repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
rosbag2
|
rosbag2-master/ros2bag/ros2bag/verb/reindex.py
|
# Copyright 2021 DCS Corporation, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DISTRIBUTION A. Approved for public release; distribution unlimited.
# OPSEC #4584.
#
# Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS
# Part 252.227-7013 or 7014 (Feb 2014).
#
# This notice must appear in all copies of this file and its derivatives.
import os
from ros2bag.api import add_standard_reader_args
from ros2bag.api import print_error
from ros2bag.verb import VerbExtension
from rosbag2_py import Reindexer, StorageOptions
class ReindexVerb(VerbExtension):
"""Reconstruct metadata file for a bag."""
def add_arguments(self, parser, cli_name):
add_standard_reader_args(parser)
def main(self, *, args):
if not os.path.isdir(args.bag_path):
return print_error('Must specify a bag directory')
storage_options = StorageOptions(
uri=args.bag_path,
storage_id=args.storage,
)
reindexer = Reindexer()
reindexer.reindex(storage_options)
| 1,577
| 31.875
| 77
|
py
|
rosbag2
|
rosbag2-master/ros2bag/ros2bag/verb/burst.py
|
# Copyright 2022 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import FileType
from rclpy.qos import InvalidQoSProfileException
from ros2bag.api import add_standard_reader_args
from ros2bag.api import check_not_negative_int
from ros2bag.api import check_positive_float
from ros2bag.api import convert_yaml_to_qos_profile
from ros2bag.api import print_error
from ros2bag.verb import VerbExtension
from ros2cli.node import NODE_NAME_PREFIX
from rosbag2_py import Player
from rosbag2_py import PlayOptions
from rosbag2_py import StorageOptions
import yaml
class BurstVerb(VerbExtension):
"""Burst data from a bag."""
def add_arguments(self, parser, cli_name): # noqa: D102
add_standard_reader_args(parser)
parser.add_argument(
'--read-ahead-queue-size', type=int, default=1000,
help='size of message queue rosbag tries to hold in memory to help deterministic '
'playback. Larger size will result in larger memory needs but might prevent '
'delay of message playback.')
parser.add_argument(
'--topics', type=str, default=[], nargs='+',
help='topics to replay, separated by space. If none specified, all topics will be '
'replayed.')
parser.add_argument(
'--qos-profile-overrides-path', type=FileType('r'),
help='Path to a yaml file defining overrides of the QoS profile for specific topics.')
parser.add_argument(
'--remap', '-m', default='', nargs='+',
help='list of topics to be remapped: in the form '
'"old_topic1:=new_topic1 old_topic2:=new_topic2 etc." ')
parser.add_argument(
'--storage-config-file', type=FileType('r'),
help='Path to a yaml file defining storage specific configurations. '
'See storage plugin documentation for the format of this file.')
parser.add_argument(
'--start-offset', type=check_positive_float, default=0.0,
help='Start the playback player this many seconds into the bag file.')
parser.add_argument(
'-n', '--num-messages', type=check_not_negative_int, default=0,
help='Burst the specified number of messages, then pause.')
def main(self, *, args): # noqa: D102
qos_profile_overrides = {} # Specify a valid default
if args.qos_profile_overrides_path:
qos_profile_dict = yaml.safe_load(args.qos_profile_overrides_path)
try:
qos_profile_overrides = convert_yaml_to_qos_profile(
qos_profile_dict)
except (InvalidQoSProfileException, ValueError) as e:
return print_error(str(e))
storage_config_file = ''
if args.storage_config_file:
storage_config_file = args.storage_config_file.name
topic_remapping = ['--ros-args']
for remap_rule in args.remap:
topic_remapping.append('--remap')
topic_remapping.append(remap_rule)
storage_options = StorageOptions(
uri=args.bag_path,
storage_id=args.storage,
storage_config_uri=storage_config_file,
)
play_options = PlayOptions()
play_options.read_ahead_queue_size = args.read_ahead_queue_size
play_options.node_prefix = NODE_NAME_PREFIX
play_options.rate = 1.0
play_options.topics_to_filter = args.topics
play_options.topic_qos_profile_overrides = qos_profile_overrides
play_options.loop = False
play_options.topic_remapping_options = topic_remapping
play_options.clock_publish_frequency = 0
play_options.delay = 0.0
play_options.disable_keyboard_controls = False # Give the user control
play_options.start_paused = True # Important for allowing the burst
play_options.start_offset = args.start_offset
play_options.wait_acked_timeout = -1
player = Player()
player.burst(storage_options, play_options, args.num_messages)
| 4,632
| 43.12381
| 98
|
py
|
rosbag2
|
rosbag2-master/ros2bag/ros2bag/verb/record.py
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import FileType
import datetime
import os
from rclpy.qos import InvalidQoSProfileException
from ros2bag.api import add_writer_storage_plugin_extensions
from ros2bag.api import convert_yaml_to_qos_profile
from ros2bag.api import print_error
from ros2bag.api import SplitLineFormatter
from ros2bag.verb import VerbExtension
from ros2cli.node import NODE_NAME_PREFIX
from rosbag2_py import get_default_storage_id
from rosbag2_py import get_registered_compressors
from rosbag2_py import get_registered_serializers
from rosbag2_py import get_registered_writers
from rosbag2_py import Recorder
from rosbag2_py import RecordOptions
from rosbag2_py import StorageOptions
import yaml
class RecordVerb(VerbExtension):
"""Record ROS data to a bag."""
def add_arguments(self, parser, cli_name): # noqa: D102
parser.formatter_class = SplitLineFormatter
writer_choices = get_registered_writers()
default_storage_id = get_default_storage_id()
default_writer = default_storage_id if default_storage_id in writer_choices else \
next(iter(writer_choices))
serialization_choices = get_registered_serializers()
converter_suffix = '_converter'
serialization_choices = {
f[:-len(converter_suffix)]
for f in serialization_choices
if f.endswith(converter_suffix)
}
# Base output
parser.add_argument(
'-o', '--output',
help='Destination of the bagfile to create, \
defaults to a timestamped folder in the current directory')
parser.add_argument(
'-s', '--storage', default=default_writer, choices=writer_choices,
help="Storage identifier to be used, defaults to '%(default)s'")
# Topic filter arguments
parser.add_argument(
'topics', nargs='*', default=None, help='List of topics to record.')
parser.add_argument(
'-a', '--all', action='store_true',
help='Record all topics. Required if no explicit topic list or regex filters.')
parser.add_argument(
'-e', '--regex', default='',
help='Record only topics containing provided regular expression. '
'Overrides --all, applies on top of topics list.')
parser.add_argument(
'-x', '--exclude', default='',
help='Exclude topics containing provided regular expression. '
'Works on top of --all, --regex, or topics list.')
# Discovery behavior
parser.add_argument(
'--include-unpublished-topics', action='store_true',
help='Discover and record topics which have no publisher. '
'Subscriptions on such topics will be made with default QoS unless otherwise '
'specified in a QoS overrides file.')
parser.add_argument(
'--include-hidden-topics', action='store_true',
help='Discover and record hidden topics as well. '
'These are topics used internally by ROS 2 implementation.')
parser.add_argument(
'--no-discovery', action='store_true',
help='Disables topic auto discovery during recording: only topics present at '
'startup will be recorded.')
parser.add_argument(
'-p', '--polling-interval', type=int, default=100,
help='Time in ms to wait between querying available topics for recording. '
'It has no effect if --no-discovery is enabled.')
parser.add_argument(
'--ignore-leaf-topics', action='store_true',
help='Ignore topics without a publisher.')
parser.add_argument(
'--qos-profile-overrides-path', type=FileType('r'),
help='Path to a yaml file defining overrides of the QoS profile for specific topics.')
# Core config
parser.add_argument(
'-f', '--serialization-format', default='', choices=serialization_choices,
help='The rmw serialization format in which the messages are saved, defaults to the '
'rmw currently in use')
parser.add_argument(
'-b', '--max-bag-size', type=int, default=0,
help='Maximum size in bytes before the bagfile will be split. '
'Default: %(default)d, recording written in single bagfile and splitting '
'is disabled.')
parser.add_argument(
'-d', '--max-bag-duration', type=int, default=0,
help='Maximum duration in seconds before the bagfile will be split. '
'Default: %(default)d, recording written in single bagfile and splitting '
'is disabled. If both splitting by size and duration are enabled, '
'the bag will split at whichever threshold is reached first.')
parser.add_argument(
'--max-cache-size', type=int, default=100*1024*1024,
help='Maximum size (in bytes) of messages to hold in each buffer of cache.'
'Default: %(default)d. The cache is handled through double buffering, '
'which means that in pessimistic case up to twice the parameter value of memory'
'is needed. A rule of thumb is to cache an order of magitude corresponding to'
'about one second of total recorded data volume.'
'If the value specified is 0, then every message is directly written to disk.')
parser.add_argument(
'--start-paused', action='store_true', default=False,
help='Start the recorder in a paused state.')
parser.add_argument(
'--use-sim-time', action='store_true', default=False,
help='Use simulation time.')
parser.add_argument(
'--node-name', type=str, default='rosbag2_recorder',
help='Specify the recorder node name. Default is %(default)s.')
parser.add_argument(
'--custom-data', type=str, metavar='KEY=VALUE', nargs='*',
help='Store the custom data in metadata.yaml'
'under "rosbag2_bagfile_information/custom_data". The key=value pair can '
'appear more than once. The last value will override the former ones.')
parser.add_argument(
'--snapshot-mode', action='store_true',
help='Enable snapshot mode. Messages will not be written to the bagfile until '
'the "/rosbag2_recorder/snapshot" service is called.')
# Storage configuration
add_writer_storage_plugin_extensions(parser)
# Core compression configuration
# TODO(emersonknapp) this configuration will be moved down to implementing plugins
parser.add_argument(
'--compression-queue-size', type=int, default=1,
help='Number of files or messages that may be queued for compression '
'before being dropped. Default is %(default)d.')
parser.add_argument(
'--compression-threads', type=int, default=0,
help='Number of files or messages that may be compressed in parallel. '
'Default is %(default)d, which will be interpreted as the number of CPU cores.')
parser.add_argument(
'--compression-mode', type=str, default='none',
choices=['none', 'file', 'message'],
help='Choose mode of compression for the storage. Default: %(default)s')
parser.add_argument(
'--compression-format', type=str, default='',
choices=get_registered_compressors(),
help='Choose the compression format/algorithm. '
'Has no effect if no compression mode is chosen. Default: %(default)s')
def main(self, *, args): # noqa: D102
# both all and topics cannot be true
if (args.all and (args.topics or args.regex)) or (args.topics and args.regex):
return print_error('Must specify only one option out of topics, --regex or --all')
# one out of "all", "topics" and "regex" must be true
if not(args.all or (args.topics and len(args.topics) > 0) or (args.regex)):
return print_error('Invalid choice: Must specify topic(s), --regex or --all')
if args.topics and args.exclude:
return print_error('--exclude argument cannot be used when specifying a list '
'of topics explicitly')
if args.exclude and not(args.regex or args.all):
return print_error('--exclude argument requires either --all or --regex')
uri = args.output or datetime.datetime.now().strftime('rosbag2_%Y_%m_%d-%H_%M_%S')
if os.path.isdir(uri):
return print_error("Output folder '{}' already exists.".format(uri))
if args.compression_format and args.compression_mode == 'none':
return print_error('Invalid choice: Cannot specify compression format '
'without a compression mode.')
if args.compression_queue_size < 0:
return print_error('Compression queue size must be at least 0.')
args.compression_mode = args.compression_mode.upper()
qos_profile_overrides = {} # Specify a valid default
if args.qos_profile_overrides_path:
qos_profile_dict = yaml.safe_load(args.qos_profile_overrides_path)
try:
qos_profile_overrides = convert_yaml_to_qos_profile(
qos_profile_dict)
except (InvalidQoSProfileException, ValueError) as e:
return print_error(str(e))
# Prepare custom_data dictionary
custom_data = {}
if args.custom_data:
key_value_pairs = [pair.split('=') for pair in args.custom_data]
custom_data = {pair[0]: pair[1] for pair in key_value_pairs}
storage_config_file = ''
if args.storage_config_file:
storage_config_file = args.storage_config_file.name
storage_options = StorageOptions(
uri=uri,
storage_id=args.storage,
max_bagfile_size=args.max_bag_size,
max_bagfile_duration=args.max_bag_duration,
max_cache_size=args.max_cache_size,
storage_preset_profile=args.storage_preset_profile,
storage_config_uri=storage_config_file,
snapshot_mode=args.snapshot_mode,
custom_data=custom_data
)
record_options = RecordOptions()
record_options.all = args.all
record_options.is_discovery_disabled = args.no_discovery
record_options.topics = args.topics
record_options.rmw_serialization_format = args.serialization_format
record_options.topic_polling_interval = datetime.timedelta(
milliseconds=args.polling_interval)
record_options.regex = args.regex
record_options.exclude = args.exclude
record_options.node_prefix = NODE_NAME_PREFIX
record_options.compression_mode = args.compression_mode
record_options.compression_format = args.compression_format
record_options.compression_queue_size = args.compression_queue_size
record_options.compression_threads = args.compression_threads
record_options.topic_qos_profile_overrides = qos_profile_overrides
record_options.include_hidden_topics = args.include_hidden_topics
record_options.include_unpublished_topics = args.include_unpublished_topics
record_options.start_paused = args.start_paused
record_options.ignore_leaf_topics = args.ignore_leaf_topics
record_options.use_sim_time = args.use_sim_time
recorder = Recorder()
try:
recorder.record(storage_options, record_options, args.node_name)
except KeyboardInterrupt:
pass
if os.path.isdir(uri) and not os.listdir(uri):
os.rmdir(uri)
| 12,576
| 47.748062
| 98
|
py
|
rosbag2
|
rosbag2-master/ros2bag/ros2bag/verb/play.py
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import FileType
from rclpy.qos import InvalidQoSProfileException
from ros2bag.api import add_standard_reader_args
from ros2bag.api import check_not_negative_int
from ros2bag.api import check_positive_float
from ros2bag.api import convert_yaml_to_qos_profile
from ros2bag.api import print_error
from ros2bag.verb import VerbExtension
from ros2cli.node import NODE_NAME_PREFIX
from rosbag2_py import Player
from rosbag2_py import PlayOptions
from rosbag2_py import StorageOptions
import yaml
def positive_float(arg: str) -> float:
value = float(arg)
if value <= 0:
raise ValueError(f'Value {value} is less than or equal to zero.')
return value
class PlayVerb(VerbExtension):
"""Play back ROS data from a bag."""
def add_arguments(self, parser, cli_name): # noqa: D102
add_standard_reader_args(parser)
parser.add_argument(
'--read-ahead-queue-size', type=int, default=1000,
help='size of message queue rosbag tries to hold in memory to help deterministic '
'playback. Larger size will result in larger memory needs but might prevent '
'delay of message playback.')
parser.add_argument(
'-r', '--rate', type=check_positive_float, default=1.0,
help='rate at which to play back messages. Valid range > 0.0.')
parser.add_argument(
'--topics', type=str, default=[], nargs='+',
help='topics to replay, separated by space. If none specified, all topics will be '
'replayed.')
parser.add_argument(
'-e', '--regex', default='',
help='filter topics by regular expression to replay, separated by space. If none '
'specified, all topics will be replayed.')
parser.add_argument(
'-x', '--exclude', default='',
help='regular expressions to exclude topics from replay, separated by space. If none '
'specified, all topics will be replayed.')
parser.add_argument(
'--qos-profile-overrides-path', type=FileType('r'),
help='Path to a yaml file defining overrides of the QoS profile for specific topics.')
parser.add_argument(
'-l', '--loop', action='store_true',
help='enables loop playback when playing a bagfile: it starts back at the beginning '
'on reaching the end and plays indefinitely.')
parser.add_argument(
'--remap', '-m', default='', nargs='+',
help='list of topics to be remapped: in the form '
'"old_topic1:=new_topic1 old_topic2:=new_topic2 etc." ')
parser.add_argument(
'--storage-config-file', type=FileType('r'),
help='Path to a yaml file defining storage specific configurations. '
'See storage plugin documentation for the format of this file.')
clock_args_group = parser.add_mutually_exclusive_group()
clock_args_group.add_argument(
'--clock', type=positive_float, nargs='?', const=40, default=0,
help='Publish to /clock at a specific frequency in Hz, to act as a ROS Time Source. '
'Value must be positive. Defaults to not publishing.')
clock_args_group.add_argument(
'--clock-topics', type=str, default=[], nargs='+',
help='List of topics separated by spaces that will trigger a /clock update '
'when a message is published on them'
)
clock_args_group.add_argument(
'--clock-topics-all', default=False, action='store_true',
help='Publishes an update on /clock immediately before each replayed message'
)
parser.add_argument(
'-d', '--delay', type=positive_float, default=0.0,
help='Sleep duration before play (each loop), in seconds. Negative durations invalid.')
parser.add_argument(
'--playback-duration', type=float, default=-1.0,
help='Playback duration, in seconds. Negative durations mark an infinite playback. '
'Default is %(default)d. '
'When positive, the maximum effective time between `playback-until-*` '
'and this argument will determine when playback stops.')
playback_until_arg_group = parser.add_mutually_exclusive_group()
playback_until_arg_group.add_argument(
'--playback-until-sec', type=float, default=-1.,
help='Playback until timestamp, expressed in seconds since epoch. '
'Mutually exclusive argument with `--playback-until-nsec`. '
'Use when floating point to integer conversion error is not a concern. '
'A negative value disables this feature. '
'Default is %(default)f. '
'When positive, the maximum effective time between `--playback-duration` '
'and this argument will determine when playback stops.')
playback_until_arg_group.add_argument(
'--playback-until-nsec', type=int, default=-1,
help='Playback until timestamp, expressed in nanoseconds since epoch. '
'Mutually exclusive argument with `--playback-until-sec`. '
'Use when floating point to integer conversion error matters for your use case. '
'A negative value disables this feature. '
'Default is %(default)s. '
'When positive, the maximum effective time between `--playback-duration` '
'and this argument will determine when playback stops.')
parser.add_argument(
'--disable-keyboard-controls', action='store_true',
help='disables keyboard controls for playback')
parser.add_argument(
'-p', '--start-paused', action='store_true', default=False,
help='Start the playback player in a paused state.')
parser.add_argument(
'--start-offset', type=check_positive_float, default=0.0,
help='Start the playback player this many seconds into the bag file.')
parser.add_argument(
'--wait-for-all-acked', type=check_not_negative_int, default=-1,
help='Wait until all published messages are acknowledged by all subscribers or until '
'the timeout elapses in millisecond before play is terminated. '
'Especially for the case of sending message with big size in a short time. '
'Negative timeout is invalid. '
'0 means wait forever until all published messages are acknowledged by all '
'subscribers. '
"Note that this option is valid only if the publisher\'s QOS profile is "
'RELIABLE.',
metavar='TIMEOUT')
parser.add_argument(
'--disable-loan-message', action='store_true', default=False,
help='Disable to publish as loaned message. '
'By default, if loaned message can be used, messages are published as loaned '
'message. It can help to reduce the number of data copies, so there is a greater '
'benefit for sending big data.')
def get_playback_until_from_arg_group(self, playback_until_sec, playback_until_nsec) -> int:
nano_scale = 1000 * 1000 * 1000
if playback_until_sec and playback_until_sec >= 0.0:
return int(playback_until_sec * nano_scale)
if playback_until_nsec and playback_until_nsec >= 0:
return playback_until_nsec
return -1
def main(self, *, args): # noqa: D102
qos_profile_overrides = {} # Specify a valid default
if args.qos_profile_overrides_path:
qos_profile_dict = yaml.safe_load(args.qos_profile_overrides_path)
try:
qos_profile_overrides = convert_yaml_to_qos_profile(
qos_profile_dict)
except (InvalidQoSProfileException, ValueError) as e:
return print_error(str(e))
storage_config_file = ''
if args.storage_config_file:
storage_config_file = args.storage_config_file.name
topic_remapping = ['--ros-args']
for remap_rule in args.remap:
topic_remapping.append('--remap')
topic_remapping.append(remap_rule)
storage_options = StorageOptions(
uri=args.bag_path,
storage_id=args.storage,
storage_config_uri=storage_config_file,
)
play_options = PlayOptions()
play_options.read_ahead_queue_size = args.read_ahead_queue_size
play_options.node_prefix = NODE_NAME_PREFIX
play_options.rate = args.rate
play_options.topics_to_filter = args.topics
play_options.topics_regex_to_filter = args.regex
play_options.topics_regex_to_exclude = args.exclude
play_options.topic_qos_profile_overrides = qos_profile_overrides
play_options.loop = args.loop
play_options.topic_remapping_options = topic_remapping
play_options.clock_publish_frequency = args.clock
if args.clock_topics_all or len(args.clock_topics) > 0:
play_options.clock_publish_on_topic_publish = True
play_options.clock_topics = args.clock_topics
play_options.delay = args.delay
play_options.playback_duration = args.playback_duration
play_options.playback_until_timestamp = self.get_playback_until_from_arg_group(
args.playback_until_sec, args.playback_until_nsec)
play_options.disable_keyboard_controls = args.disable_keyboard_controls
play_options.start_paused = args.start_paused
play_options.start_offset = args.start_offset
play_options.wait_acked_timeout = args.wait_for_all_acked
play_options.disable_loan_message = args.disable_loan_message
player = Player()
try:
player.play(storage_options, play_options)
except KeyboardInterrupt:
pass
| 10,706
| 49.985714
| 99
|
py
|
rosbag2
|
rosbag2-master/ros2bag/ros2bag/verb/info.py
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ros2bag.api import add_standard_reader_args
from ros2bag.verb import VerbExtension
from rosbag2_py._info import Info
class InfoVerb(VerbExtension):
"""Print information about a bag to the screen."""
def add_arguments(self, parser, cli_name): # noqa: D102
add_standard_reader_args(parser)
parser.add_argument(
'-t', '--topic-name', action='store_true',
help='Only display topic names.'
)
def main(self, *, args): # noqa: D102
m = Info().read_metadata(args.bag_path, args.storage)
if args.topic_name:
for topic_info in m.topics_with_message_count:
print(topic_info.topic_metadata.name)
else:
print(m)
| 1,336
| 35.135135
| 74
|
py
|
rosbag2
|
rosbag2-master/ros2bag/ros2bag/verb/convert.py
|
# Copyright 2021 Amazon.com Inc or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ros2bag.verb import VerbExtension
from rosbag2_py import bag_rewrite
from rosbag2_py import StorageOptions
class ConvertVerb(VerbExtension):
"""Given an input bag, write out a new bag with different settings."""
def add_arguments(self, parser, cli_name):
parser.add_argument(
'-i', '--input',
required=True,
action='append', nargs='+',
metavar=('uri', 'storage_id'),
help='URI (and optional storage ID) of an input bag. May be provided more than once')
parser.add_argument(
'-o', '--output-options',
type=str, required=True,
help='YAML file with options for output bags. Must have one top-level key '
'"output_bags", which contains a sequence of StorageOptions/RecordOptions '
'objects. See README.md for some examples.')
def main(self, *, args):
input_options = []
for input_bag in args.input:
if len(input_bag) > 2:
raise argparse.ArgumentTypeError(
f'--input expects 1 or 2 arguments, {len(input_bag)} provided')
storage_options = StorageOptions(uri=input_bag[0])
if len(input_bag) > 1:
storage_options.storage_id = input_bag[1]
input_options.append(storage_options)
bag_rewrite(input_options, args.output_options)
| 2,024
| 38.705882
| 97
|
py
|
rosbag2
|
rosbag2-master/ros2bag/ros2bag/verb/__init__.py
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ros2cli.plugin_system import PLUGIN_SYSTEM_VERSION
from ros2cli.plugin_system import satisfies_version
class VerbExtension:
"""
The extension point for 'bag' verb extensions.
The following properties must be defined:
* `NAME` (will be set to the entry point name)
The following methods must be defined:
* `main`
The following methods can be defined:
* `add_arguments`
"""
NAME = None
EXTENSION_POINT_VERSION = '0.1'
def __init__(self):
super(VerbExtension, self).__init__()
satisfies_version(PLUGIN_SYSTEM_VERSION, '^0.1')
def add_arguments(self, parser, cli_name):
pass
def main(self, *, args):
raise NotImplementedError()
| 1,328
| 28.533333
| 74
|
py
|
rosbag2
|
rosbag2-master/ros2bag/ros2bag/verb/list.py
|
# Copyright 2020 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from xml.dom import minidom
from ament_index_python import get_resource
from ament_index_python import get_resources
from ros2bag.verb import VerbExtension
class ListVerb(VerbExtension):
"""Print information about available plugins to the screen."""
def add_arguments(self, parser, cli_name): # noqa: D102
parser.add_argument(
'plugin_type',
help='lists available plugins',
choices=['storage', 'converter', 'compressor', 'decompressor'])
parser.add_argument(
'-v', '--verbose', help='output verbose information about the available plugin',
action='store_true')
def main(self, *, args): # noqa: D102
# the following is the resource index which is created when installing a pluginlib xml file
if args.plugin_type == 'storage':
pluginlib_resource_index = 'rosbag2_storage__pluginlib__plugin'
elif args.plugin_type == 'compressor' or args.plugin_type == 'decompressor':
pluginlib_resource_index = 'rosbag2_compression__pluginlib__plugin'
else:
pluginlib_resource_index = 'rosbag2_cpp__pluginlib__plugin'
resources = get_resources(pluginlib_resource_index)
if args.verbose:
print('available %s plugins are:' % args.plugin_type)
for resource in resources:
plugin_xml_file_paths, base_folder = get_resource(pluginlib_resource_index, resource)
for file_path in list(filter(None, plugin_xml_file_paths.split('\n'))):
abs_path = os.path.join(base_folder, file_path)
if not os.path.exists(abs_path):
return 'path does not exist: %s' % abs_path
xmldoc = minidom.parse(abs_path)
for class_item in xmldoc.getElementsByTagName('class'):
class_name = class_item.attributes['name']
type_name = class_item.attributes['type']
base_class_name = class_item.attributes['base_class_type']
description = class_item.getElementsByTagName('description')[0]
# Compression and decompression plugins share the same resource index
# so they must be filtered using their base class
if args.plugin_type == 'compressor' and \
base_class_name.value != \
'rosbag2_compression::BaseCompressorInterface':
continue
elif args.plugin_type == 'decompressor' and \
base_class_name.value != \
'rosbag2_compression::BaseDecompressorInterface':
continue
print('%s%s' % (('name: ' if args.verbose else ''), class_name.value))
if args.verbose:
print('\t%s' % description.childNodes[0].data)
print('\ttype: %s' % type_name.value)
print('\tbase_class: %s' % base_class_name.value)
| 3,688
| 45.696203
| 99
|
py
|
baryrat
|
baryrat-master/test.py
|
import numpy as np
import baryrat
import scipy.interpolate
import flamp
import gmpy2
import pytest
def test_init():
nodes = [0, 1, 2]
values = [1, 2, 0]
weights = [0.5, -1, 0.5]
r = baryrat.BarycentricRational(nodes, values, weights)
X = np.linspace(0, 2, 100)
assert np.allclose(r(X), -3/2*X**2 + 5/2*X + 1)
def test_approx():
Z = np.linspace(0.0, 1.0, 101)
def f(z): return np.exp(z)*np.sin(2*np.pi*z)
F = f(Z)
r = baryrat.aaa(Z, F, mmax=10)
assert np.linalg.norm(r(Z) - F, np.inf) < 1e-10, 'insufficient approximation'
# check invoking with functions
r2 = baryrat.aaa(Z, f, mmax=10)
assert np.linalg.norm(r(Z) - r2(Z), np.inf) < 1e-15
# check that calling r works for scalars, vectors, matrices
assert np.isscalar(r(0.45))
assert r(np.ones(7)).shape == (7,)
assert r(np.ones((3,2))).shape == (3,2)
def test_aaa_complex():
Z = np.linspace(0.0, 1.0, 101)
def f(z): return np.exp(2j*np.pi*z)
F = f(Z)
r = baryrat.aaa(Z, F, mmax=8)
assert np.linalg.norm(r(Z) - F, np.inf) < 1e-10, 'insufficient approximation'
def test_reproduction():
p = [-1.0, -2.0, -3.0]
def f(z):
return (z**3 - 2*z**2 + 4*z - 7) / ((z - p[0])*(z - p[1])*(z - p[2]))
nodes = np.arange(1, 8, dtype=float)
r = baryrat.aaa(nodes, f(nodes))
assert np.allclose(f(nodes), r(nodes))
z = np.linspace(0, 1, 100)
assert np.allclose(f(z), r(z))
pol, res = r.polres()
assert np.allclose(sorted(p), sorted(pol))
ratfun = sum(res[j] / (z - pol[j]) for j in range(len(pol)))
bias = ratfun - f(z)
assert np.allclose(bias, bias[0]) # should be constant
flamp.set_dps(100)
pol, res = r.polres(use_mp=True)
pol = np.real_if_close(np.array(pol, complex))
assert np.allclose(sorted(p), sorted(pol))
ratfun = sum(res[j] / (z - pol[j]) for j in range(len(pol)))
bias = np.array(ratfun - f(z), complex)
assert np.allclose(bias, bias[0]) # should be constant
def test_polres():
Z = np.linspace(0.0, 1.0, 101)
F = np.exp(Z) * np.sin(2*np.pi*Z)
r = baryrat.aaa(Z, F, mmax=6)
pol, res = r.polres()
assert np.allclose(pol,
np.array([2.26333482+0.j, 0.2338428+0.90087977j,
0.2338428-0.90087977j, 0.96472415+0.85470621j,
0.96472415-0.85470621j]))
assert np.allclose(res,
np.array([69.08984183+0.j, 20.50747913-9.24908921j,
20.50747913+9.24908921j, 23.24692682+23.94602455j,
23.24692682-23.94602455j]))
polvals = r(pol)
assert np.min(np.abs(polvals)) > 1e13
# check that gain == r(inf)
assert np.allclose(r.gain(), r(1e14))
def test_zeros():
Z = np.linspace(0.0, 1.0, 101)
F = np.exp(Z) * np.sin(2*np.pi*Z)
r = baryrat.aaa(Z, F, mmax=6)
zer = r.zeros()
assert np.allclose(zer,
np.array([-0.38621461, 1.43052691, 0.49999907, 1., 0.]))
assert np.allclose(r(zer), 0.0)
zer2 = r.zeros(use_mp=True)
zer2 = np.real_if_close(np.array(zer2, complex))
assert np.allclose(sorted(zer), sorted(zer2))
def test_reciprocal():
nodes = np.linspace(0, 1, 4)
r = baryrat.floater_hormann(nodes, np.exp(-nodes), 2)
rr = r.reciprocal()
Z = np.linspace(0, 1, 100)
assert np.allclose(1 / r(Z), rr(Z))
def test_interpolate_rat():
Z = np.linspace(1, 5, 7)
F = np.sin(Z)
r = baryrat.interpolate_rat(Z, F)
assert np.allclose(r(Z), F)
X = np.linspace(1, 5, 100)
err = np.linalg.norm(r(X) - np.sin(X), np.inf)
assert err < 2e-3
#
p, q = r.numerator(), r.denominator()
assert np.allclose(p(X) / q(X), r(X))
assert r.degree() == (3, 3)
def test_interpolate_with_degree():
X = np.linspace(0, 1, 100)
##
def f(x):
return (x + 3) / ((x + 1) * (x + 2))
Z = np.linspace(0, 1, 4)
r = baryrat.interpolate_with_degree(Z, f(Z), (1, 2))
assert np.allclose(f(X), r(X))
assert r.order == 2
assert r.degree() == (1, 2)
##
def f(x):
return (x * (x + 1) * (x + 2)) / (x + 3)
Z = np.linspace(0, 1, 5)
r = baryrat.interpolate_with_degree(Z, f(Z), (3, 1))
assert np.allclose(f(X), r(X))
assert r.order == 3
assert r.degree() == (3, 1)
def test_interpolate_rat_mp():
flamp.set_dps(100)
X = flamp.linspace(0, 1, 100)
##
def f(x):
return (x + 3) / ((x + 1) * (x + 2))
Z = flamp.linspace(0, 1, 5)
r = baryrat.interpolate_rat(Z, f(Z))
assert np.linalg.norm(f(X) - r(X), np.inf) < 1e-90
assert r.order == 2
##
def f(x):
return (x * (x + 1) * (x + 2)) / (x + 3)
Z = flamp.linspace(0, 1, 7)
r = baryrat.interpolate_rat(Z, f(Z))
assert np.linalg.norm(f(X) - r(X), np.inf) < 1e-90
assert r.order == 3
## test complex case
n = 9
Z = np.array([np.exp(2j * k / n * np.pi) for k in range(0, n)])
r = baryrat.interpolate_rat(Z, np.exp(Z), use_mp=True)
X = 1j * flamp.linspace(-1, 1, 100)
assert abs(flamp.exp(X) - r(X)).max() < 1e-7
## same thing with Z already in mpc form
Z = flamp.exp(1j * flamp.linspace(0, 2 * np.pi, n, endpoint=False))
r = baryrat.interpolate_rat(Z, flamp.exp(Z))
X = 1j * flamp.linspace(-1, 1, 100)
assert abs(flamp.exp(X) - r(X)).max() < 1e-7
def test_interpolate_with_degree_mp():
flamp.set_dps(100)
X = flamp.linspace(0, 1, 100)
##
def f(x):
return (x + 3) / ((x + 1) * (x + 2))
Z = flamp.linspace(0, 1, 4)
r = baryrat.interpolate_with_degree(Z, f(Z), (1, 2))
assert np.linalg.norm(f(X) - r(X), np.inf) < 1e-90
assert r.order == 2
assert r.degree() == (1, 2)
##
def f(x):
return (x * (x + 1) * (x + 2)) / (x + 3)
Z = flamp.linspace(0, 1, 5)
r = baryrat.interpolate_with_degree(Z, f(Z), (3, 1))
assert np.linalg.norm(f(X) - r(X), np.inf) < 1e-90
assert r.order == 3
assert r.degree() == (3, 1)
def test_reduce_order():
nodes = np.linspace(0, 1, 11)
r = baryrat.interpolate_rat(nodes, np.ones_like(nodes))
assert r.order == 5
r2 = r.reduce_order()
assert r2.order == 0
X = np.linspace(0, 1, 25)
assert np.allclose(r2(X), 1.0)
#
# another test with full order (no reduction)
r = baryrat.interpolate_rat(nodes, np.sin(nodes))
assert r.order == 5
r2 = r.reduce_order()
assert r2.order == 5
X = np.linspace(0, 1, 25)
assert np.allclose(r2(X), r(X))
def test_interpolate_rat_complex():
Z = np.linspace(0.0, 1.0, 9)
def f(z): return np.exp(2j*np.pi*z)
F = f(Z)
r = baryrat.interpolate_rat(Z, F)
assert np.allclose(r(Z), F) # check interpolation property
X = np.linspace(0.0, 1.0, 100)
err = np.linalg.norm(r(X) - f(X), np.inf)
assert err < 1e-4 # check interpolation error
def test_interpolate_poly():
Z = np.linspace(1, 5, 7)
F = np.sin(Z)
p = baryrat.interpolate_poly(Z, F)
p1 = scipy.interpolate.lagrange(Z, F)
X = np.linspace(1, 5, 100)
assert np.allclose(p(X), p1(X))
def test_interpolate_poly_complex():
Z = np.linspace(0.0, 1.0, 9)
def f(z): return np.exp(2j*np.pi*z)
F = f(Z)
p = baryrat.interpolate_poly(Z, F)
assert np.allclose(p(Z), F) # check interpolation property
X = np.linspace(0.0, 1.0, 100)
err = np.linalg.norm(p(X) - f(X), np.inf)
assert err < 2e-3 # check interpolation error
def test_interpolate_with_poles():
Z = np.arange(1, 5)
F = np.sin(Z)
poles = [-1, -2, -3]
r = baryrat.interpolate_with_poles(Z, F, poles)
assert np.allclose(r(Z), F)
pol, res = r.polres()
assert np.allclose(sorted(pol), sorted(poles))
pol1 = r.poles()
pol2 = r.poles(use_mp=True)
pol2 = np.real_if_close(np.array(pol2, complex))
assert np.allclose(sorted(pol1), sorted(poles))
assert np.allclose(sorted(pol2), sorted(poles))
def test_interpolate_with_poles_mp():
flamp.set_dps(100)
Z = flamp.linspace(1.0, 4.0, 4)
F = flamp.sin(Z)
poles = [-3, -2, -1]
r = baryrat.interpolate_with_poles(Z, F, poles)
assert r.uses_mp()
assert np.array_equal(r(Z), F)
pol, res = r.polres()
pol = np.real_if_close(np.array(pol, complex))
pol.sort()
assert np.linalg.norm(pol - poles) < 1e-90
def test_interpolate_floater_hormann():
n = 10
Z = np.linspace(-5, 5, n + 1)
X = np.linspace(-5, 5, 200)
def f(z): return 1.0 / (1 + z**2) # Runge's example
F = f(Z)
# normalized weights for the equidistant case given in FH2007
correct_abs_weights = [
[1, 1, 1, 1],
[1, 2, 2, 2],
[1, 3, 4, 4],
[1, 4, 7, 8]
]
for d in range(4):
r = baryrat.floater_hormann(Z, F, d)
assert np.allclose(r(Z), F)
w = abs(r.weights / r.weights[0]) # normalize
assert np.allclose(w[:4], correct_abs_weights[d])
if d == 3:
err = np.linalg.norm(r(X) - f(X), np.inf)
assert err < 6.9e-2 # published error in FH2007
# check that d=n results in polynomial interpolant
r = baryrat.floater_hormann(Z, F, n)
p = scipy.interpolate.lagrange(Z, F)
assert np.allclose(r(X), p(X))
def test_deriv():
def f(x):
return (x + 3) / ((x + 1) * (x + 2))
def df(x):
return -2 / (x + 1)**2 + 1 / (x + 2)**2
def d2f(x):
return 4 / (x + 1)**3 - 2 / (x + 2)**3
# compute barycentric representation of f
Z = np.linspace(0, 1, 4)
r = baryrat.interpolate_with_degree(Z, f(Z), (1, 2))
X = np.linspace(0, 1, 50)
X[:len(r.nodes)] = r.nodes # also test evaluation exactly on the nodes
assert np.allclose(r.eval_deriv(X), df(X))
assert np.allclose(r.eval_deriv(X, k=2), d2f(X))
def test_chebnodes():
n = 7
nodes = baryrat.chebyshev_nodes(n, (0, 4))
nodes = (nodes / 2 - 1) # scale back to (-1,1)
import scipy.special
values = scipy.special.eval_chebyt(7, nodes)
assert abs(values).max() < 1e-14
def test_brasil():
r, info = baryrat.brasil(np.sqrt, [0,1], 10, tol=1e-5, info=True)
assert info.converged
assert info.deviation <= 1e-5
assert info.error <= 5e-6
assert(len(info.errors) == info.iterations + 1)
def test_brasil_poly():
# problem with known error
# http://www-solar.mcs.st-and.ac.uk/~clare/Lectures/num-analysis/Numan_chap4.pdf
p, info = baryrat.brasil(np.exp, [0,1], (1,0), tol=1e-12, info=True)
assert info.converged
m = np.exp(1) - 1
theta = np.log(m)
c = (m + np.exp(1) - m*theta - m) / 2
E = 1 - c
assert np.allclose(info.error, E)
# https://doi.org/10.1007/s10543-009-0240-1
def f(x): return np.sin(np.exp(x))
p, info = baryrat.brasil(f, [-1,1], (10,0), npi=-30, info=True) # use golden section search
assert np.allclose(info.error, 1.78623400e-6)
#
def f(x): return np.sqrt(x + 1)
p, info = baryrat.brasil(f, [-1,1], (10,0), tol=1e-8, info=True)
assert np.allclose(info.error, 1.978007008380e-2)
def test_brasil_deg():
r, info = baryrat.brasil(np.sqrt, [0,1], (10,5), tol=1e-8, info=True)
assert info.converged
assert info.deviation <= 1e-8
assert info.error <= 6e-5
assert(len(info.errors) == info.iterations + 1)
#
r, info = baryrat.brasil(np.sqrt, [0,1], (5,10), tol=1e-8, info=True)
assert info.converged
assert info.deviation <= 1e-8
assert info.error <= 8e-5
assert(len(info.errors) == info.iterations + 1)
def test_jacobians():
Z = np.linspace(1, 5, 7)
r = baryrat.interpolate_rat(Z, np.sin(Z))
x = np.linspace(2, 4, 3)
Dz, Df, Dw = r.jacobians(x)
delta = 1e-6
# compare to finite differences
for k in range(len(r.nodes)):
z_delta = r.nodes.copy()
z_delta[k] += delta
r_delta = baryrat.BarycentricRational(z_delta, r.values, r.weights)
deriv = (r_delta(x) - r(x)) / delta
assert np.allclose(Dz[:,k], deriv)
f_delta = r.values.copy()
f_delta[k] += delta
r_delta = baryrat.BarycentricRational(r.nodes, f_delta, r.weights)
deriv = (r_delta(x) - r(x)) / delta
assert np.allclose(Df[:,k], deriv)
w_delta = r.weights.copy()
w_delta[k] += delta
r_delta = baryrat.BarycentricRational(r.nodes, r.values, w_delta)
deriv = (r_delta(x) - r(x)) / delta
assert np.allclose(Dw[:,k], deriv)
def test_bpane():
def f(x): return abs(x)
def f_deriv(x): return np.sign(x)
interval = (-1.0, 1.0)
p, info = baryrat.bpane(f, f_deriv, interval, 11, info=True, verbose=0)
assert info.error < 2.8e-2 and abs(info.lam) < 2.8e-2
X = np.linspace(*interval, 100)
assert abs(f(X) - p(X)).max() < info.error * (1 + 1e-6) # allow some tolerance
assert info.iterations == 11
# try with numerical derivative
p, info = baryrat.bpane(f, None, interval, 11, info=True, verbose=0)
assert info.error < 2.8e-2 and abs(info.lam) < 2.8e-2
assert info.iterations == 11
# check that failure to converge (even degree for abs) is signaled
with pytest.raises((RuntimeError, np.linalg.LinAlgError)):
baryrat.bpane(f, f_deriv, interval, 10, maxiter=100, verbose=0)
def test_brane():
flamp.set_dps(100)
def f(x): return abs(x)
def f_deriv(x): return np.sign(x)
interval = (-1.0, 1.0)
r, info = baryrat.brane(f, f_deriv, interval, (11, 10), info=True, verbose=0)
assert info.error < 2.7e-4 and abs(info.lam) < 2.7e-4
X = np.linspace(*interval, 100)
assert abs(f(X) - r(X)).max() < info.error * (1 + 1e-10) # allow some tolerance
assert info.iterations == 15
# try with numerical derivative
r, info = baryrat.brane(f, None, interval, (11, 10), info=True, verbose=0)
assert info.error < 2.7e-4 and abs(info.lam) < 2.7e-4
assert info.iterations == 15
# check that failure to converge (even degree for abs) is signaled
with pytest.raises(RuntimeError):
baryrat.brane(f, f_deriv, interval, (10, 10), maxiter=10, verbose=0)
| 13,991
| 32.473684
| 99
|
py
|
baryrat
|
baryrat-master/setup.py
|
from setuptools import setup
import os
from io import open # Py2.7 compatibility
def readme():
with open(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'README.md'
), encoding='utf8') as fp:
return fp.read()
setup(
name = 'baryrat',
version = '2.1.0',
description = 'A Python package for barycentric rational approximation',
long_description = readme(),
long_description_content_type = 'text/markdown',
author = 'Clemens Hofreither',
author_email = 'clemens.hofreither@ricam.oeaw.ac.at',
url = 'https://github.com/c-f-h/baryrat',
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Mathematics',
'License :: OSI Approved :: BSD License',
],
py_modules = ['baryrat'],
install_requires = [
'numpy>=1.11',
'scipy',
],
tests_require = 'nose',
test_suite = 'nose.collector'
)
| 1,067
| 28.666667
| 76
|
py
|
baryrat
|
baryrat-master/baryrat.py
|
"""A Python package for barycentric rational approximation.
"""
import numpy as np
import scipy.linalg
import math
try:
import gmpy2
import flamp
except ImportError:
gmpy2 = None
flamp = None
else:
from gmpy2 import mpfr, mpc
__version__ = '2.1.0'
def _is_mp_array(x):
"""Checks whether `x` is an ndarray containing gmpy2 extended precision numbers."""
return (gmpy2
and x.dtype == 'O'
and len(x) > 0
and (isinstance(x.flat[0], mpfr) or isinstance(x.flat[0], mpc)))
def _q(z, f, w, x):
"""Function which can compute the 'upper' or 'lower' rational function
in a barycentric rational function.
`x` may be a number or a column vector.
"""
return np.sum((f * w) / (x - z), axis=-1)
def _compute_roots(w, x, use_mp):
# Cf.:
# Knockaert, L. (2008). A simple and accurate algorithm for barycentric
# rational interpolation. IEEE Signal processing letters, 15, 154-157.
#
# This version requires solving only a standard eigenvalue problem, but
# has troubles when the polynomial has leading 0 coefficients.
if _is_mp_array(w) or _is_mp_array(x):
use_mp = True
if use_mp:
assert flamp, 'flamp package is not installed'
ak = flamp.to_mp(w) # TODO: this always copies!
bk = flamp.to_mp(x)
ak /= sum(ak)
M = np.diag(bk) - np.outer(ak, x)
lam = flamp.eig(M, left=False, right=False)
# remove one simple root
lam = np.delete(lam, np.argmin(abs(lam)))
return lam
else:
# the same procedure in standard double precision
ak = w / w.sum()
M = np.diag(x) - np.outer(ak, x)
lam = scipy.linalg.eigvals(M)
# remove one simple root
lam = np.delete(lam, np.argmin(abs(lam)))
return np.real_if_close(lam)
def _compute_roots2(z, f, w):
# computation of roots/poles by companion matrix pair; see, e.g.:
# Fast Reduction of Generalized Companion Matrix Pairs for
# Barycentric Lagrange Interpolants,
# Piers W. Lawrence, SIAM J. Matrix Anal. Appl., 2013
# https://doi.org/10.1137/130904508
#
# This version can deal with leading 0 coefficients of the polynomial, but
# requires solving a generalized eigenvalue problem, which is currently not
# supported in mpmath/flamp.
B = np.eye(len(w) + 1)
B[0,0] = 0
E = np.block([[0, w],
[f[:,None], np.diag(z)]])
evals = scipy.linalg.eigvals(E, B)
return np.real_if_close(evals[np.isfinite(evals)])
def _mp_svd(A, full_matrices=True):
"""Convenience wrapper for high-precision SVD."""
assert flamp, 'flamp package is not installed'
return flamp.svd(A, full_matrices=full_matrices)
def _mp_qr(A):
"""Convenience wrapper for high-precision QR decomposition."""
assert flamp, 'flamp package is not installed'
return flamp.qr(A, mode='full')
def _nullspace_vector(A, use_mp=False):
if _is_mp_array(A):
use_mp = True
if use_mp:
Q, _ = _mp_qr(A.T)
else:
if A.shape[0] == 0:
# some LAPACK implementations have trouble with size 0 matrices
result = np.zeros(A.shape[1])
result[0] = 1.0
return result
Q, _ = scipy.linalg.qr(A.T, mode='full')
return Q[:, -1].conj()
class BarycentricRational:
"""A class representing a rational function in barycentric representation.
Args:
z (array): the interpolation nodes
f (array): the values at the interpolation nodes
w (array): the weights
The rational function has the interpolation property r(z_j) = f_j at all
nodes where w_j != 0.
"""
def __init__(self, z, f, w):
if not (len(z) == len(f) == len(w)):
raise ValueError('arrays z, f, and w must have the same length')
self.nodes = np.asanyarray(z)
self.values = np.asanyarray(f)
self.weights = np.asanyarray(w)
def __call__(self, x):
"""Evaluate rational function at all points of `x`."""
zj,fj,wj = self.nodes, self.values, self.weights
xv = np.asanyarray(x).ravel()
if len(xv) == 0:
return np.empty(np.shape(x), dtype=xv.dtype)
D = xv[:,None] - zj[None,:]
# find indices where x is exactly on a node
(node_xi, node_zi) = np.nonzero(D == 0)
one = xv[0] * 0 + 1 # for proper dtype when using MP
with np.errstate(divide='ignore', invalid='ignore'):
if len(node_xi) == 0: # no zero divisors
C = np.divide(one, D)
r = C.dot(wj * fj) / C.dot(wj)
else:
# set divisor to 1 to avoid division by zero
D[node_xi, node_zi] = one
C = np.divide(one, D)
r = C.dot(wj * fj) / C.dot(wj)
# fix evaluation at nodes to corresponding fj
# TODO: this is only correct if wj != 0
r[node_xi] = fj[node_zi]
if np.isscalar(x):
return r[0]
else:
r.shape = np.shape(x)
return r
def uses_mp(self):
"""Checks whether any of the data of this rational function uses
extended precision.
"""
return _is_mp_array(self.nodes) or _is_mp_array(self.values) or _is_mp_array(self.weights)
def eval_deriv(self, x, k=1):
"""Evaluate the `k`-th derivative of this rational function at a scalar
node `x`, or at each point of an array `x`. Only the cases `k <= 2` are
currently implemented.
Note that this function may incur significant numerical error if `x` is
very close (but not exactly equal) to a node of the barycentric
rational function.
References:
https://doi.org/10.1090/S0025-5718-1986-0842136-8 (C. Schneider and
W. Werner, 1986)
"""
if k == 0:
return self(x)
# the implementation below assumes scalars, so use numpy to vectorize
# if we got an array
if not np.isscalar(x):
return np.vectorize(lambda X: self.eval_deriv(X, k=k), otypes=[x.dtype])(x)
# is x one of our nodes?
nodeidx = np.nonzero(x == self.nodes)[0]
if len(nodeidx) > 0:
i = nodeidx[0] # node index of x
dx = self.nodes - x
dx[i] = np.inf # set i-th summand to 0
if k == 1:
# first-order divided differences
dd = (self(self.nodes) - self(x)) / dx
elif k == 2:
# second-order divided differences with nodes (x, x, z_i)
# (note that repeated nodes correspond to the first derivative)
dd1 = (self(self.nodes) - self(x)) / dx
dd = (dd1 - self.eval_deriv(x, k=1)) / dx
else:
raise NotImplementedError('derivatives higher than 2 not implemented')
return -np.sum(dd * self.weights) / self.weights[i] * math.factorial(k)
else:
# x is not a node -- use divided differences
if k == 1:
# first-order divided differences
dd = (self(self.nodes) - self(x)) / (self.nodes - x)
elif k == 2:
# second-order divided differences with nodes (x, x, z_i)
# (note that repeated nodes correspond to the first derivative)
dd1 = (self(self.nodes) - self(x)) / (self.nodes - x)
dd = (dd1 - self.eval_deriv(x, k=1)) / (self.nodes - x)
else:
raise NotImplementedError('derivatives higher than 2 not implemented')
return BarycentricRational(self.nodes, dd, self.weights)(x) * math.factorial(k)
def jacobians(self, x):
"""Compute the Jacobians of `r(x)`, where `x` may be a vector of
evaluation points, with respect to the node, value, and weight vectors.
The evaluation points `x` may not lie on any of the barycentric nodes
(unimplemented).
Returns:
A triple of arrays with as many rows as `x` has entries and as many
columns as the barycentric function has nodes, representing the
Jacobians with respect to :attr:`self.nodes`, :attr:`self.values`,
and :attr:`self.weights`, respectively.
"""
z, f, w = self.nodes, self.values, self.weights
N1 = len(z)
x_c = np.atleast_2d(x).T # column vector
dr_z, dr_f, dr_w = [], [], []
qz1 = _q(z, 1, w, x_c)
# build matrices columnwise (j = node index)
for j in range(N1):
f_diff = np.subtract(f[j], f)
x_minus_zj = np.subtract(x, z[j])
dr_z.append(_q(z, f_diff * w[j], w, x_c) / (x_minus_zj * qz1)**2)
dr_f.append(np.divide(w[j], (x_minus_zj * qz1)))
dr_w.append(_q(z, f_diff, w, x_c) / (x_minus_zj * qz1**2))
return np.column_stack(dr_z), np.column_stack(dr_f), np.column_stack(dr_w)
@property
def order(self):
"""The order of the barycentric rational function, that is, the maximum
degree that its numerator and denominator may have, or the number of
interpolation nodes minus one.
"""
return len(self.nodes) - 1
def poles(self, use_mp=False):
"""Return the poles of the rational function.
If ``use_mp`` is ``True``, uses the ``flamp`` multiple precision
package to compute the result. This option is automatically enabled if
:meth:`uses_mp` is True.
"""
if use_mp or self.uses_mp():
return _compute_roots(self.weights, self.nodes, use_mp=True)
else:
return _compute_roots2(self.nodes, np.ones_like(self.values), self.weights)
def polres(self, use_mp=False):
"""Return the poles and residues of the rational function.
If ``use_mp`` is ``True``, uses the ``flamp`` multiple precision
package to compute the result. This option is automatically enabled if
:meth:`uses_mp` is True.
"""
zj,fj,wj = self.nodes, self.values, self.weights
m = len(wj)
if self.uses_mp():
use_mp = True
# compute poles
pol = self.poles(use_mp=use_mp)
# compute residues via formula for simple poles of quotients of analytic functions
C_pol = 1.0 / (pol[:,None] - zj[None,:])
N_pol = C_pol.dot(fj*wj)
Ddiff_pol = (-C_pol**2).dot(wj)
res = N_pol / Ddiff_pol
return pol, res
def zeros(self, use_mp=False):
"""Return the zeros of the rational function.
If ``use_mp`` is ``True``, uses the ``flamp`` multiple precision
package to compute the result. This option is automatically enabled if
:meth:`uses_mp` is True.
"""
if use_mp or self.uses_mp():
return _compute_roots(self.weights*self.values, self.nodes,
use_mp=True)
else:
return _compute_roots2(self.nodes, self.values, self.weights)
def gain(self):
"""The gain in a poles-zeros-gain representation of the rational function,
or equivalently, the value at infinity.
"""
return np.sum(self.values * self.weights) / np.sum(self.weights)
def reciprocal(self):
"""Return a new :class:`BarycentricRational` which is the reciprocal of this one."""
return BarycentricRational(
self.nodes.copy(),
1 / self.values,
self.weights * self.values)
def numerator(self):
"""Return a new :class:`BarycentricRational` which represents the numerator polynomial."""
weights = _polynomial_weights(self.nodes)
return BarycentricRational(self.nodes.copy(), self.values * self.weights / weights, weights)
def denominator(self):
"""Return a new :class:`BarycentricRational` which represents the denominator polynomial."""
weights = _polynomial_weights(self.nodes)
return BarycentricRational(self.nodes.copy(), self.weights / weights, weights)
def degree_numer(self, tol=1e-12):
"""Compute the true degree of the numerator polynomial.
Uses a result from [Berrut, Mittelmann 1997].
"""
N = len(self.nodes) - 1
for defect in range(N):
if abs(np.sum(self.values * self.weights * (self.nodes ** defect))) > tol:
return N - defect
return 0
def degree_denom(self, tol=1e-12):
"""Compute the true degree of the denominator polynomial.
Uses a result from [Berrut, Mittelmann 1997].
"""
N = len(self.nodes) - 1
for defect in range(N):
if abs(np.sum(self.weights * (self.nodes ** defect))) > tol:
return N - defect
return 0
def degree(self, tol=1e-12):
"""Compute the pair `(m,n)` of true degrees of the numerator and denominator."""
return (self.degree_numer(tol=tol), self.degree_denom(tol=tol))
def reduce_order(self):
"""Return a new :class:`BarycentricRational` which represents the same rational
function as this one, but with minimal possible order.
See (Ionita 2013), PhD thesis.
"""
# sample at intermediate nodes and compute Loewner matrix
aux_nodes = (self.nodes[1:] + self.nodes[:-1]) / 2
aux_v = self(aux_nodes)
L = (aux_v[:, None] - self.values[None, :]) / (aux_nodes[:, None] - self.nodes[None, :])
# determine the order as the rank of L (cf. (Ionita 2013))
order = np.linalg.matrix_rank(L)
if order == self.order:
return BarycentricRational(self.nodes.copy(), self.values.copy(), self.weights.copy())
n = order + 1 # number of nodes in new barycentric function
scale = 1 if n==1 else int((len(self.nodes) - 1) / (n - 1)) # distribute new nodes over the old ones
subset = np.arange(0, scale*n, scale) # choose a subset of n nodes from self.nodes
# compute Loewner matrix for new subset of nodes
nodes = self.nodes[subset]
values = self.values[subset]
aux_nodes = (nodes[1:] + nodes[:-1]) / 2
aux_v = self(aux_nodes)
L = (aux_v[:, None] - values[None, :]) / (aux_nodes[:, None] - self.nodes[None, subset])
# compute weight vector in nullspace
w = _nullspace_vector(L)
return BarycentricRational(nodes, values, w)
################################################################################
def aaa(Z, F, tol=1e-13, mmax=100, return_errors=False):
"""Compute a rational approximation of `F` over the points `Z` using the
AAA algorithm.
Arguments:
Z (array): the sampling points of the function. Unlike for interpolation
algorithms, where a small number of nodes is preferred, since the
AAA algorithm chooses its support points adaptively, it is better
to provide a finer mesh over the support.
F: the function to be approximated; can be given as a function or as an
array of function values over ``Z``.
tol: the approximation tolerance
mmax: the maximum number of iterations/degree of the resulting approximant
return_errors: if `True`, also return the history of the errors over
all iterations
Returns:
BarycentricRational: an object which can be called to evaluate the
rational function, and can be queried for the poles, residues, and
zeros of the function.
For more information, see the paper
| The AAA Algorithm for Rational Approximation
| Yuji Nakatsukasa, Olivier Sete, and Lloyd N. Trefethen
| SIAM Journal on Scientific Computing 2018 40:3, A1494-A1522
| https://doi.org/10.1137/16M1106122
as well as the Chebfun package <http://www.chebfun.org>. This code is an
almost direct port of the Chebfun implementation of aaa to Python.
"""
Z = np.asanyarray(Z).ravel()
if callable(F):
# allow functions to be passed
F = F(Z)
F = np.asanyarray(F).ravel()
J = list(range(len(F)))
zj = np.empty(0, dtype=Z.dtype)
fj = np.empty(0, dtype=F.dtype)
C = []
errors = []
reltol = tol * np.linalg.norm(F, np.inf)
R = np.mean(F) * np.ones_like(F)
for m in range(mmax):
# find largest residual
jj = np.argmax(abs(F - R))
zj = np.append(zj, (Z[jj],))
fj = np.append(fj, (F[jj],))
J.remove(jj)
# Cauchy matrix containing the basis functions as columns
C = 1.0 / (Z[J,None] - zj[None,:])
# Loewner matrix
A = (F[J,None] - fj[None,:]) * C
# compute weights as right singular vector for smallest singular value
_, _, Vh = np.linalg.svd(A)
wj = Vh[-1, :].conj()
# approximation: numerator / denominator
N = C.dot(wj * fj)
D = C.dot(wj)
# update residual
R = F.copy()
R[J] = N / D
# check for convergence
errors.append(np.linalg.norm(F - R, np.inf))
if errors[-1] <= reltol:
break
r = BarycentricRational(zj, fj, wj)
return (r, errors) if return_errors else r
def interpolate_rat(nodes, values, use_mp=False):
"""Compute a rational function which interpolates the given nodes/values.
Args:
nodes (array): the interpolation nodes; must have odd length and
be passed in strictly increasing or decreasing order
values (array): the values at the interpolation nodes
use_mp (bool): whether to use ``gmpy2`` for extended precision. Is
automatically enabled if `nodes` or `values` use ``gmpy2``.
Returns:
BarycentricRational: the rational interpolant. If there are `2n + 1` nodes,
both the numerator and denominator have degree at most `n`.
References:
https://doi.org/10.1109/LSP.2007.913583
"""
# ref: (Knockaert 2008), doi:10.1109/LSP.2007.913583
# see also: (Ionita 2013), PhD thesis, Rice U
values = np.asanyarray(values)
nodes = np.asanyarray(nodes)
n = len(values) // 2 + 1
m = n - 1
if not len(values) == n + m or not len(nodes) == n + m:
raise ValueError('number of nodes should be odd')
xa, xb = nodes[0::2], nodes[1::2]
va, vb = values[0::2], values[1::2]
# compute the Loewner matrix
B = (vb[:, None] - va[None, :]) / (xb[:, None] - xa[None, :])
# choose a weight vector in the nullspace of B
weights = _nullspace_vector(B, use_mp=use_mp)
return BarycentricRational(xa, va, weights)
def _pseudo_equi_nodes(n, k):
"""Choose `k` out of `n` nodes in a quasi-equispaced way."""
if k > n:
raise ValueError("k must not be larger than n")
else:
return np.rint(np.linspace(0.0, n-1, k)).astype(int)
def _defect_matrix(x, i0, iend, f=None):
powers_m = np.arange(i0, iend)
W = x[None, :] ** powers_m[:, None]
if f is not None:
W *= f[None, :]
return W
def _defect_matrix_arnoldi(x, m, f=None):
# Arnoldi-type orthonormalization of the defect matrix.
# Based on an idea from Filip et al., 2018, p. A2431.
# doi: 10.1137/17M1132409
if m == 0:
return np.empty((0, len(x)), dtype=x.dtype)
if f is None:
f = 0 * x + 1 # has the proper dtype when using MP
if f.dtype == 'O' or x.dtype == 'O': # slight hack - mpfr has no sqrt() method!
norm = flamp.vector_norm
else:
norm = np.linalg.norm
f = f / norm(f)
Q = [f]
for k in range(1, m):
q = Q[-1] * x
for j in range(len(Q)):
q -= Q[j] * np.inner(q, Q[j])
q /= norm(q)
Q.append(q)
return np.array(Q)
def interpolate_with_degree(nodes, values, deg, use_mp=False):
"""Compute a rational function which interpolates the given nodes/values
with given degree `m` of the numerator and `n` of the denominator.
Args:
nodes (array): the interpolation nodes
values (array): the values at the interpolation nodes
deg: a pair `(m, n)` of the degrees of the interpolating rational
function. The number of interpolation nodes must be `m + n + 1`.
use_mp (bool): whether to use ``gmpy2`` for extended precision. Is
automatically enabled if `nodes` or `values` use ``gmpy2``.
Returns:
BarycentricRational: the rational interpolant
References:
https://doi.org/10.1016/S0377-0427(96)00163-X
"""
m, n = deg
nn = m + n + 1
if len(nodes) != nn or len(values) != nn:
raise ValueError('number of interpolation nodes must be m + n + 1')
if n == 0:
return interpolate_poly(nodes, values)
elif m == n:
return interpolate_rat(nodes, values, use_mp=use_mp)
else:
N = max(m, n) # order of barycentric rational function
# split given values into primary and secondary nodes
primary_indices = _pseudo_equi_nodes(nn, N + 1)
secondary_indices = np.setdiff1d(np.arange(nn), primary_indices, assume_unique=True)
xp, vp = nodes[primary_indices], values[primary_indices]
xs, vs = nodes[secondary_indices], values[secondary_indices]
# compute Loewner matrix - shape: (m + n - N) x (N + 1)
L = (vs[:, None] - vp[None, :]) / (xs[:, None] - xp[None, :])
# add weight constraints for denominator and numerator degree; see (Berrut, Mittelmann 1997)
# B has shape N x (N + 1)
B = np.vstack((
L,
_defect_matrix_arnoldi(xp, N - n), # reduce maximum denominator degree by N - n
_defect_matrix_arnoldi(xp, N - m, vp) # reduce maximum numerator degree by N - m
))
# choose a weight vector in the nullspace of B
weights = _nullspace_vector(B, use_mp=use_mp)
return BarycentricRational(xp, vp, weights)
def _polynomial_weights(x):
n = len(x)
w = np.array([
1.0 / np.prod([x[i] - x[j] for j in range(n) if j != i])
for i in range(n)
])
return w / np.abs(w).max()
def interpolate_poly(nodes, values):
"""Compute the interpolating polynomial for the given nodes and values in
barycentric form.
Args:
nodes (array): the interpolation nodes
values (array): the function values at the interpolation nodes
Returns:
BarycentricRational: the polynomial interpolant
"""
n = len(nodes)
if n != len(values):
raise ValueError('input arrays should have the same length')
weights = _polynomial_weights(nodes)
return BarycentricRational(nodes, values, weights)
def interpolate_with_poles(nodes, values, poles, use_mp=False):
"""Compute a rational function which interpolates the given values at the
given nodes and which has the given poles.
Args:
nodes (array): the interpolation nodes (length `n`)
values (array): the function values at the interpolation nodes (length `n`)
poles (array): the locations of the poles of the rational function (length `n-1`)
use_mp (bool): whether to use ``gmpy2`` for extended precision
Returns:
BarycentricRational: the rational interpolant with the given poles
"""
# ref: (Knockaert 2008), doi:10.1109/LSP.2007.913583
n = len(nodes)
if n != len(values) or n != len(poles) + 1:
raise ValueError('invalid length of arrays')
nodes = np.asanyarray(nodes)
values = np.asanyarray(values)
poles = np.asanyarray(poles)
# compute Cauchy matrix
C = 1.0 / (poles[:,None] - nodes[None,:])
# compute null space
weights = _nullspace_vector(C, use_mp=use_mp)
return BarycentricRational(nodes, values, weights)
def floater_hormann(nodes, values, blending):
"""Compute the Floater-Hormann rational interpolant for the given nodes and
values.
Args:
nodes (array): the interpolation nodes (length `n`)
values (array): the function values at the interpolation nodes (length `n`)
blending (int): the blending parameter (usually called `d` in the literature),
an integer between 0 and `n-1` (inclusive). For functions with
higher smoothness, the blending parameter may be chosen higher. For
`d=n-1`, the result is the polynomial interpolant.
Returns:
BarycentricRational: the rational interpolant
References:
(Floater, Hormann 2007): https://doi.org/10.1007/s00211-007-0093-y
"""
n = len(values) - 1
if n != len(nodes) - 1:
raise ValueError('input arrays should have the same length')
if not (0 <= blending <= n):
raise ValueError('blending parameter should be between 0 and n')
weights = np.zeros(n + 1)
# abbreviations to match the formulas in the literature
d = blending
x = nodes
for i in range(n + 1):
Ji = range(max(0, i-d), min(i, n-d) + 1)
weight = 0.0
for k in Ji:
weight += np.prod([1.0 / abs(x[i] - x[j])
for j in range(k, k+d+1)
if j != i])
weights[i] = (-1.0)**(i-d) * weight
return BarycentricRational(nodes, values, weights)
def _piecewise_mesh(nodes, n):
"""Build a mesh over an interval with subintervals described by the array
``nodes``. Each subinterval has ``n`` points spaced uniformly between the
two neighboring nodes. The final mesh has ``(len(nodes) - 1) * n`` points.
"""
#z = np.concatenate(([z0], nodes, [z1]))
M = len(nodes)
return np.concatenate(tuple(
np.linspace(nodes[i], nodes[i+1], n, endpoint=(i==M-2))
for i in range(M - 1)))
def local_maxima_bisect(g, nodes, num_iter=10):
L, R = nodes[1:-2], nodes[2:-1]
# compute 3 x m array of endpoints and midpoints
z = np.vstack((L, (L + R) / 2, R))
values = g(z[1])
m = z.shape[1]
for k in range(num_iter):
# compute quarter points
q = np.vstack(((z[0] + z[1]) / 2, (z[1] + z[2])/ 2))
qval = g(q)
# move triple of points to be centered on the maximum
for j in range(m):
maxk = np.argmax([qval[0,j], values[j], qval[1,j]])
if maxk == 0:
z[1,j], z[2,j] = q[0,j], z[1,j]
values[j] = qval[0,j]
elif maxk == 1:
z[0,j], z[2,j] = q[0,j], q[1,j]
else:
z[0,j], z[1,j] = z[1,j], q[1,j]
values[j] = qval[1,j]
# find maximum per column (usually the midpoint)
#maxidx = values.argmax(axis=0)
# select abscissae and values at maxima
#Z, gZ = z[maxidx, np.arange(m)], values[np.arange(m)]
Z, gZ = np.empty(m+2), np.empty(m+2)
Z[1:-1] = z[1, :]
gZ[1:-1] = values
# treat the boundary intervals specially since usually the maximum is at the boundary
Z[0], gZ[0] = _boundary_search(g, nodes[0], nodes[1], num_iter=3)
Z[-1], gZ[-1] = _boundary_search(g, nodes[-2], nodes[-1], num_iter=3)
return Z, gZ
def local_maxima_golden(g, nodes, num_iter):
# vectorized version of golden section search
golden_mean = (3.0 - np.sqrt(5.0)) / 2 # 0.381966...
L, R = nodes[1:-2], nodes[2:-1] # skip boundary intervals (treated below)
# compute 3 x m array of endpoints and midpoints
z = np.vstack((L, L + (R-L)*golden_mean, R))
m = z.shape[1]
all_m = np.arange(m)
gB = g(z[1])
for k in range(num_iter):
# z[1] = midpoints
mids = (z[0] + z[2]) / 2
# compute new nodes according to golden section
farther_idx = (z[1] <= mids).astype(int) * 2 # either 0 or 2
X = z[1] + golden_mean * (z[farther_idx, all_m] - z[1])
gX = g(X)
for j in range(m):
x = X[j]
gx = gX[j]
b = z[1,j]
if gx > gB[j]:
if x > b:
z[0,j] = z[1,j]
else:
z[2,j] = z[1,j]
z[1,j] = x
gB[j] = gx
else:
if x < b:
z[0,j] = x
else:
z[2,j] = x
# prepare output arrays
Z, gZ = np.empty(m+2, dtype=z.dtype), np.empty(m+2, dtype=gB.dtype)
Z[1:-1] = z[1, :]
gZ[1:-1] = gB
# treat the boundary intervals specially since usually the maximum is at the boundary
# (no bracket available!)
Z[0], gZ[0] = _boundary_search(g, nodes[0], nodes[1], num_iter=3)
Z[-1], gZ[-1] = _boundary_search(g, nodes[-2], nodes[-1], num_iter=3)
return Z, gZ
def _boundary_search(g, a, c, num_iter):
X = [a, c]
Xvals = [g(a), g(c)]
max_side = 0 if (Xvals[0] >= Xvals[1]) else 1
other_side = 1 - max_side
for k in range(num_iter):
xm = (X[0] + X[1]) / 2
gm = g(xm)
if gm < Xvals[max_side]:
# no new maximum found; shrink interval and iterate
X[other_side] = xm
Xvals[other_side] = gm
else:
# found a bracket for the minimum
return _golden_search(g, X[0], X[1], num_iter=num_iter-k)
return X[max_side], Xvals[max_side]
def _golden_search(g, a, c, num_iter=20):
golden_mean = 0.5 * (3.0 - np.sqrt(5.0))
b = (a + c) / 2
gb = g(b)
ga, gc = g(a), g(c)
if not (gb >= ga and gb >= gc):
# not bracketed - maximum may be at the boundary
return _boundary_search(g, a, c, num_iter)
for k in range(num_iter):
mid = (a + c) / 2
if b > mid:
x = b + golden_mean * (a - b)
else:
x = b + golden_mean * (c - b)
gx = g(x)
if gx > gb:
# found a larger point, use it as center
if x > b:
a = b
else:
c = b
b = x
gb = gx
else:
# point is smaller, use it as boundary
if x < b:
a = x
else:
c = x
return b, gb
def local_maxima_sample(g, nodes, N):
Z = _piecewise_mesh(nodes, N).reshape((-1, N))
vals = g(Z)
maxk = vals.argmax(axis=1)
nn = np.arange(Z.shape[0])
return Z[nn, maxk], vals[nn, maxk]
def chebyshev_nodes(num_nodes, interval=(-1.0, 1.0), use_mp=False):
"""Compute `num_nodes` Chebyshev nodes of the first kind in the given interval."""
if use_mp:
nodes = (1 - flamp.cos((2*flamp.to_mp(np.arange(1, num_nodes + 1)) - 1) / (2*num_nodes) * gmpy2.const_pi()))
a, b = interval
a, b = gmpy2.mpfr(a), gmpy2.mpfr(b)
return nodes * ((b - a) / 2) + a
else:
# compute nodes in (-1, 1)
nodes = (1 - np.cos((2*np.arange(1, num_nodes + 1) - 1) / (2*num_nodes) * np.pi))
# rescale to desired interval
a, b = interval
return nodes * ((b - a) / 2) + a
def brasil(f, interval, deg, tol=1e-4, maxiter=1000, max_step_size=0.1,
step_factor=0.1, npi=-30, init_steps=100, info=False):
"""Best Rational Approximation by Successive Interval Length adjustment.
Computes best rational or polynomial approximations in the maximum norm by
the BRASIL algorithm (see reference below).
References:
https://doi.org/10.1007/s11075-020-01042-0
Arguments:
f: the scalar function to be approximated. Must be able to operate
on arrays of arguments.
interval: the bounds `(a, b)` of the approximation interval
deg: the degree of the numerator `m` and denominator `n` of the
rational approximation; either an integer (`m=n`) or a pair `(m, n)`.
If `n = 0`, a polynomial best approximation is computed.
tol: the maximum allowed deviation from equioscillation
maxiter: the maximum number of iterations
max_step_size: the maximum allowed step size
step_factor: factor for adaptive step size choice
npi: points per interval for error calculation. If `npi < 0`,
golden section search with `-npi` iterations is used instead of
sampling. For high-accuracy results, `npi=-30` is typically a good
choice.
init_steps: how many steps of the initialization iteration to run
info: whether to return an additional object with details
Returns:
BarycentricRational: the computed rational approximation. If `info` is
True, instead returns a pair containing the approximation and an
object with additional information (see below).
The `info` object returned along with the approximation if `info=True` has
the following members:
* **converged** (bool): whether the method converged to the desired tolerance **tol**
* **error** (float): the maximum error of the approximation
* **deviation** (float): the relative error between the smallest and the largest
equioscillation peak. The convergence criterion is **deviation** <= **tol**.
* **nodes** (array): the abscissae of the interpolation nodes (2*deg + 1)
* **iterations** (int): the number of iterations used, including the initialization phase
* **errors** (array): the history of the maximum error over all iterations
* **deviations** (array): the history of the deviation over all iterations
* **stepsizes** (array): the history of the adaptive step size over all iterations
Additional information about the resulting rational function, such as poles,
residues and zeroes, can be queried from the :class:`BarycentricRational` object
itself.
Note:
This function supports ``gmpy2`` for extended precision. To enable
this, specify the interval `(a, b)` as `mpfr` numbers, e.g.,
``interval=(mpfr(0), mpfr(1))``. Also make sure that the function `f`
consumes and outputs arrays of `mpfr` numbers; the Numpy function
:func:`numpy.vectorize` may help with this.
"""
a, b = interval
assert a < b, 'Invalid interval'
if np.isscalar(deg):
m = n = deg
else:
if len(deg) != 2:
raise TypeError("'deg' must be an integer or pair of integers")
m, n = deg
nn = m + n + 1 # number of interpolation nodes
errors = []
stepsize = np.nan
# start with Chebyshev nodes
nodes = chebyshev_nodes(nn, (a, b))
# choose proper interpolation routine
if n == 0:
interp = interpolate_poly
elif m == n:
interp = interpolate_rat
else:
interp = lambda x,f: interpolate_with_degree(x, f, (m, n))
for k in range(init_steps + maxiter):
r = interp(nodes, f(nodes))
# determine local maxima per interval
all_nodes = np.concatenate(([a], nodes, [b]))
errfun = lambda x: abs(f(x) - r(x))
if npi > 0:
local_max_x, local_max = local_maxima_sample(errfun, all_nodes, npi)
else:
local_max_x, local_max = local_maxima_golden(errfun, all_nodes, num_iter=-npi)
max_err = local_max.max()
deviation = max_err / local_max.min() - 1
errors.append((max_err, deviation, stepsize))
converged = deviation <= tol
if converged or k == init_steps + maxiter - 1:
# convergence or maxiter reached -- return result
if not converged:
print('warning: BRASIL did not converge; dev={0:.3}, err={1:.3}'.format(deviation, max_err))
else:
# Until now, we have only equilibrated the absolute errors.
# Check equioscillation property for the signed errors to make
# sure we actually found the best approximation.
signed_errors = f(local_max_x) - r(local_max_x)
# normalize them so that they are all 1 in case of equioscillation
signed_errors /= (-1)**np.arange(len(signed_errors)) * np.sign(signed_errors[0]) * max_err
equi_err = abs(1.0 - signed_errors).max()
if equi_err > tol:
print('warning: equioscillation property not satisfied, deviation={0:.3}'.format(equi_err))
if info:
from collections import namedtuple
Info = namedtuple('Info',
'converged error deviation nodes iterations ' +
'errors deviations stepsizes')
errors = np.array(errors)
return r, Info(
converged, max_err, deviation, nodes, k,
errors[:,0], errors[:,1], errors[:,2],
)
else:
return r
if k < init_steps:
# PHASE 1:
# move an interpolation node to the point of largest error
max_intv_i = local_max.argmax()
max_err_x = local_max_x[max_intv_i]
# we can't move a node to the boundary, so check for that case
# and move slightly inwards
if max_err_x == a:
max_err_x = (3 * a + nodes[0]) / 4
elif max_err_x == b:
max_err_x = (nodes[-1] + 3 * b) / 4
# find the node to move (neighboring the interval with smallest error)
min_k = local_max.argmin()
if min_k == 0:
min_j = 0
elif min_k == nn:
min_j = nn - 1
else:
# of the two nodes on this interval, choose the farther one
if abs(max_err_x - nodes[min_k-1]) < abs(max_err_x - nodes[min_k]):
min_j = min_k
else:
min_j = min_k - 1
# move the node and re-sort the array
nodes[min_j] = max_err_x
nodes.sort()
else:
# PHASE 2:
# global interval size adjustment
intv_lengths = np.diff(all_nodes)
mean_err = np.mean(local_max)
max_dev = abs(local_max - mean_err).max()
normalized_dev = (local_max - mean_err) / max_dev
stepsize = min(max_step_size, step_factor * max_dev / mean_err)
scaling = (1.0 - stepsize)**normalized_dev
intv_lengths *= scaling
# rescale so that they add up to b-a again
intv_lengths *= (b - a) / intv_lengths.sum()
nodes = np.cumsum(intv_lengths)[:-1] + a
################################################################################
# Newton approximation algorithms
################################################################################
def _omega(x, X):
"""Nodal polynomial with nodes `x` evaluated at `X`."""
return np.prod(X[:, None] - x[None, :], axis=-1)
def _om_j(j, x, X):
"""Nodal polynomial with nodes `x` without j-th term evaluated at `X`."""
X = np.atleast_1d(X)
indices = np.delete(np.arange(len(x)), j)
return np.prod(X[:, None] - x[None, indices], axis=-1)
def _p_gradient(x, fx, f_deriv_x, X):
"""Derivatives of interpolating polynomial through nodes `x` w.r.t. all interpolating nodes
evaluated at the abscissae `X`."""
n = len(x)
Q_diag = [
f_deriv_x[i] - fx[i] * sum(1 / (x[i] - x[k]) for k in range(n) if k != i)
for i in range(n)]
ww = [_om_j(i, x, x[i])[0] for i in range(n)]
Q_ij = [[ 0 if (i==j) else fx[j] * ww[i] / (ww[j] * (x[j] - x[i]))
for j in range(n) ]
for i in range(n) ]
Q = np.diag(Q_diag) + Q_ij
omg = _omega(x, X)
return np.array([Q[i].sum() * omg / ((X - x[i]) * ww[i]) for i in range(n)])
def _finite_diff(all_nodes, f, eps=1e-8):
# use finite differences to approximate first derivatives in the interior nodes
intv_lengths = np.diff(all_nodes)
delta = eps * np.minimum(intv_lengths[:-1], intv_lengths[1:]) # smaller of the two neighboring intervals
a, b = all_nodes[0], all_nodes[-1]
x = all_nodes[1:-1] # skip beginning and end (a,b) of interval
xplus = np.minimum(x + delta, b)
xminus = np.maximum(x - delta, a)
return (f(xplus) - f(xminus)) / (xplus - xminus)
def bpane(f, f_deriv, interval, deg, tol=1e-8, maxiter=1000, verbose=0, info=False):
"""Best polynomial approximation using Newton's algorithm.
Compute the best uniform polynomial approximation of degree `deg` of the
function `f` with derivative `f_deriv` in the given `interval`.
References:
https://www.ricam.oeaw.ac.at/files/reports/21/rep21-46.pdf
Arguments:
f: the scalar function to be approximated. Must be able to operate
on arrays of arguments.
f_deriv: the derivative of `f`. If `None` is passed, a central
finite difference quotient is used to approximate the derivative.
interval: the bounds `(a, b)` of the approximation interval
deg: the degree of the approximating polynomial
tol: the maximum allowed deviation from equioscillation
maxiter: the maximum number of iterations
verbose: if greater than 0, the progress is printed in each iteration
info: whether to return an additional object with details
Returns:
BarycentricRational: the computed polynomial approximation. If `info` is
True, instead returns a pair containing the approximation and an
object with additional information (see below).
The `info` object returned along with the approximation if `info=True` has
the following members:
* **error** (float): the maximum error of the approximation
* **lam** (float): the quantity lambda (signed error)
* **deviation** (float): the relative error between the smallest and the largest
equioscillation peak. The convergence criterion is **deviation** <= **tol**.
* **nodes** (array): the abscissae of the interpolation nodes (`deg` + 1)
* **iterations** (int): the number of iterations used
"""
a, b = interval
nn = deg + 1
# start with Chebyshev nodes
x = chebyshev_nodes(nn, (a, b))
w = (-1)**np.arange(nn + 1)
lam = None
def errfun(X): return abs(f(X) - p(X))
for it in range(maxiter):
# interpolate at current nodes x
p = interpolate_poly(x, f(x))
# determine the local error maxima
all_nodes = np.concatenate(([a], x, [b]))
local_max_x, local_max = local_maxima_golden(errfun, all_nodes, num_iter=30)
signed_errs = f(local_max_x) - p(local_max_x)
equalized_errs = signed_errs / w
if lam is None: # in first iteration, make a guess for lambda
lam = np.mean(equalized_errs)
# compute the Jacobian of the nonlinear system of equations
if f_deriv:
derivs = f_deriv(x)
else:
derivs = _finite_diff(all_nodes, f, eps=1e-8)
Jac_p = -_p_gradient(x, f(x), derivs, local_max_x).T
Jac = np.hstack((Jac_p, -w[:, None]))
# compute the residual
xx = np.concatenate((x, [lam]))
rhs = f(local_max_x) - p(local_max_x) - lam * w
# direction for the Newton step
dxl = -np.linalg.solve(Jac, rhs)
if any(np.isnan(dxl)):
raise RuntimeError('breakdown in computing Newton step')
# determine permissible step size tau
tau = 1.0
while True:
x_new = x + tau * dxl[:-1]
lam_new = lam + tau * dxl[-1]
if all(a < x_new) and all(x_new < b) and all(np.diff(x_new) > 0):
break
else:
tau *= 0.5
x, lam = x_new, lam_new
# check if the error have correct signs
equal_signs = (all(equalized_errs >= 0) or all(equalized_errs <= 0))
# deviation from equioscillation
dev = local_max.max() / local_max.min() - 1
if verbose > 0:
print(f' tau = {tau:8.2g} res = {np.linalg.norm(rhs):8.2g} dev = {dev:8.2g} lambda = {lam:8.2g}')
if equal_signs and dev < tol:
if info:
from collections import namedtuple
Info = namedtuple('Info',
'error lam deviation nodes iterations')
return p, Info(local_max.max(), lam, dev, x, it)
else:
return p
raise RuntimeError(f'no convergence after {maxiter} iterations')
### rational case
def _interpolate_rat_with_jac(x, fx, f_deriv_x, deg):
"""Return the rational interpolant as well as a function for computing the
Jacobian with respect to the interpolation nodes.
"""
m, n = deg
nn = m + n + 1
assert x.shape[0] == nn, 'Wrong number of nodes'
N = max(m, n)
# compute primary and secondary nodes
idx_p = _pseudo_equi_nodes(nn, N + 1)
idx_s = np.setdiff1d(np.arange(nn), idx_p, assume_unique=True)
z, z_hat = x[idx_p], x[idx_s]
n_s, n_p = len(z_hat), len(z) # n_p = N + 1
######## Jacobian of w ########
### Loewner matrix L
zhk, zl = z_hat[:, np.newaxis], z[np.newaxis, :]
fzhk, fzl = fx[idx_s, np.newaxis], fx[np.newaxis, idx_p]
f_deriv_zhk, f_deriv_zl = f_deriv_x[idx_s, np.newaxis], f_deriv_x[np.newaxis, idx_p]
L = (fzhk - fzl) / (zhk - zl) # L has shape n_s x n_p
# derivatives of L with respect to z (column-wise)
L_deriv_col = (L - f_deriv_zl) / (zhk - zl)
# derivatives of L with respect to z_hat (row-wise)
L_deriv_row = (f_deriv_zhk - L) / (zhk - zl)
### matrix A containing the degree constraints
LA = np.vstack((
L,
_defect_matrix(z, 0, N - n),
_defect_matrix(z, 0, N - m, fx[idx_p])
))
# column-wise derivatives of upper part of A
if N == n:
A_deriv1 = flamp.zeros((0, n_p))
else:
A_deriv1 = np.vstack((
flamp.zeros((1, n_p)),
_defect_matrix(z, 0, N - n - 1) * np.arange(1, N - n)[:, None]
))
# column-wise derivatives of lower part of A
if N == m:
A_deriv2 = flamp.zeros((0, n_p))
else:
A_deriv2 = np.vstack((
flamp.zeros((1, n_p)),
_defect_matrix(z, 0, N - m - 1) * np.arange(1, N - m)[:, None]
))
A_deriv2 = _defect_matrix(z, 0, N - m, f_deriv_x[idx_p]) + fx[None, idx_p] * A_deriv2
A_deriv_col = np.vstack((A_deriv1, A_deriv2))
# compute QR of LA.T with high precision
Q, R = flamp.qr(LA.T)
R = R[:N, :N] # keep only square triangular part (drop zeros)
w = Q[:, -1] # weight vector in nullspace
B = -Q[:, :N] @ flamp.L_solve(R.T, flamp.eye(N))
# compute Jacobian of null vector w with respect to each x_j
w_jac = flamp.zeros((N + 1, nn))
# compute derivative of the nullspace of LA in direction L_dot
for j in range(n_p):
LA_dot = flamp.zeros(LA.shape)
LA_dot[:n_s, j] = L_deriv_col[:, j]
LA_dot[n_s:, j] = A_deriv_col[:, j]
w_dot = B @ (LA_dot @ w)
w_jac[:, idx_p[j]] = w_dot
for j in range(n_s):
LA_dot = flamp.zeros(LA.shape)
LA_dot[j, :] = L_deriv_row[j, :]
w_dot = B @ (LA_dot @ w)
w_jac[:, idx_s[j]] = w_dot
###############################
z_jac = flamp.zeros((n_p, nn))
ftilde_jac = flamp.zeros((n_p, nn))
for j in range(n_p):
k = idx_p[j]
z_jac[j, k] = gmpy2.mpfr(1)
ftilde_jac[j, k] = f_deriv_x[k]
r = BarycentricRational(z, fx[idx_p], w)
def compute_jac(xi):
dr_z, dr_f, dr_w = r.jacobians(xi)
# chain rule
r_jac = dr_z @ z_jac + dr_f @ ftilde_jac + dr_w @ w_jac
return r_jac
return r, compute_jac
def brane(f, f_deriv, interval, deg, tol=1e-16, maxiter=1000, initial_nodes=None, verbose=0, info=False):
"""Best rational approximation using Newton's algorithm.
Compute the best uniform rational approximation of the function `f` with
derivative `f_deriv` in the given `interval`.
References:
https://www.ricam.oeaw.ac.at/files/reports/22/rep22-02.pdf
Arguments:
f: the scalar function to be approximated. Must be able to operate
on arrays of arguments.
f_deriv: the derivative of `f`. If `None` is passed, a central
finite difference quotient is used to approximate the derivative.
interval: the bounds `(a, b)` of the approximation interval
deg: the degree `(m, n)` of the approximating rational function
tol: the maximum allowed deviation from equioscillation
maxiter: the maximum number of iterations
initial_nodes: an array of length `m + n + 1` with the starting
interpolation nodes. If not given, Chebyshev nodes of the first
kind are used.
verbose: if greater than 0, the progress is printed in each iteration
info: whether to return an additional object with details
Returns:
BarycentricRational: the computed rational approximation. If `info` is
True, instead returns a pair containing the approximation and an
object with additional information (see below).
The `info` object returned along with the approximation if `info=True` has
the following members:
* **error** (float): the maximum error of the approximation
* **lam** (float): the quantity lambda (signed error)
* **deviation** (float): the relative error between the smallest and the largest
equioscillation peak. The convergence criterion is **deviation** <= **tol**.
* **nodes** (array): the abscissae of the interpolation nodes (`m + n + 1`)
* **iterations** (int): the number of iterations used
Note:
This function requires the ``gmpy2`` and ``flamp`` packages for
extended precision. Remember to set the precision by
``flamp.set_dps(...)`` before use.
"""
m, n = deg
nn = m + n + 1
a, b = interval
if initial_nodes is not None:
if len(initial_nodes) != nn:
raise ValueError('initial nodes have wrong length, should be ' + str(nn))
x = flamp.to_mp(initial_nodes)
else:
x = chebyshev_nodes(nn, interval, use_mp=True)
w = (-1)**np.arange(nn + 1)
lam = None
for num_iter in range(maxiter):
all_nodes = np.concatenate(([gmpy2.mpfr(a)], x, [gmpy2.mpfr(b)]))
if f_deriv:
derivs = f_deriv(x)
else:
derivs = _finite_diff(all_nodes, f, eps=1e-8)
r, compute_jac = _interpolate_rat_with_jac(x, f(x), derivs, deg)
def errfun(X): return abs(f(X) - r(X))
local_max_x, local_max = local_maxima_golden(errfun, all_nodes, num_iter=30)
if lam is None: # in first iteration, make a guess for lambda
lam = np.mean((f(local_max_x) - r(local_max_x)) / w)
xx = np.concatenate((x, [lam]))
rhs = f(local_max_x) - r(local_max_x) - lam*w
dev = local_max.max() / local_max.min() - 1
if dev < tol:
if verbose > 0:
print('%d iterations, dev = %e, error = %e' % (num_iter, dev, lam))
if info:
from collections import namedtuple
Info = namedtuple('Info',
'error lam deviation nodes iterations')
return r, Info(local_max.max(), lam, dev, x, num_iter)
else:
return r
# direction for the Newton step
Jac = -np.hstack((compute_jac(local_max_x), w[:, np.newaxis]))
# solve the system for the Newton step using extended precision
dxl = flamp.lu_solve(Jac, -rhs)
# find admissible step size tau
tau = gmpy2.mpfr(1)
while True:
x_new = x + tau * dxl[:-1]
lam_new = lam + tau * dxl[-1]
if all(a < x_new) and all(x_new < b) and all(np.diff(x_new) > 0):
break
else:
tau = tau / 2
x, lam = x_new, lam_new
if verbose > 0:
res_norm = flamp.vector_norm(rhs)
print(f' tau = {tau:8.2g} res = {res_norm:8.2g} dev = {dev:8.2g} lambda = {lam:8.2g}')
raise RuntimeError(f'no convergence after {maxiter} iterations')
| 52,100
| 37.79449
| 118
|
py
|
baryrat
|
baryrat-master/docs/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'baryrat'
copyright = '2020-2022, Clemens Hofreither'
author = 'Clemens Hofreither'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
master_doc = 'index'
| 2,008
| 33.050847
| 79
|
py
|
VAD
|
VAD-master/configure/ACAM/config.py
|
lr = 0.0001
dropout_rate = 0.5
max_epoch = 100
batch_size = 128
w = 19
u = 9
glimpse_hidden = 128
bp_hidden = 128
glimpse_out = 128
nGlimpse = 7
lstm_cell_size = 128
action_hidden_1 = 256
action_hidden_2 = 256
| 210
| 14.071429
| 21
|
py
|
VAD
|
VAD-master/configure/LSTM/config.py
|
lr=0.0001 # Learning rate
max_epoch=100 # Max epoch
dropout_rate=0.5 # Dropout rate
target_delay=5 # Target delay of LSTM
num_layers=3 # The number of layers of LSTM
cell_size=256 # LSTM cell size
seq_len=20 # Sequence length
num_batches=200 # The number of batches
# Note that batch_size=seq_len*num_batches
| 379
| 37
| 62
|
py
|
VAD
|
VAD-master/configure/DNN/config.py
|
lr=0.0001
dropout_rate=0.5
max_epoch=100
batch_size=128
w=19
u=9
num_hidden_1=512
num_hidden_2=512
| 99
| 10.111111
| 16
|
py
|
VAD
|
VAD-master/configure/bDNN/config.py
|
lr = 0.0001
dropout_rate = 0.5
max_epoch = 1000
batch_size = 128
w = 19
u = 9
num_hidden_1 = 512
num_hidden_2 = 512
| 116
| 12
| 18
|
py
|
VAD
|
VAD-master/lib/python/parallel_random_search.py
|
import VAD_Proposed as VR
import numpy as np
import tensorflow as tf
import pickle
from multiprocessing import Process, Queue
'''random search script'''
distribution_num = 7
test_num = 1
max_epoch = 101 # 351
gpu_0_append = 4
def get_parameter(min_val, max_val, shape):
x = np.random.rand(shape[0], shape[1])
x = (max_val - min_val) * x + min_val
return x
def vad_fnc(p_initLr, p_clip_threshold, p_device, p_max_epoch, output):
result = []
for i in range(len(p_initLr)):
item = []
print(i)
tf.reset_default_graph()
VR.config(c_initLr=p_initLr[i], c_clip_threshold=p_clip_threshold[i], c_device=p_device,
c_max_epoch=p_max_epoch)
accuracy_list, variance_list = VR.main()
item.append(p_initLr[i])
item.append(p_clip_threshold[i])
item.append(p_device)
item.append(accuracy_list)
item.append(variance_list)
result.append(item)
output.put(result)
# def fnc1(x, y, output):
# z = x + y
# output.put(z)
def fnctot():
init_lr = get_parameter(1e-4, 1e-3, (distribution_num, test_num))
init_lr = init_lr.tolist()
clip_threshold = get_parameter(11, 12, (distribution_num, test_num)) # 0.01 ~ 1
clip_threshold = clip_threshold.tolist()
queue_list = []
procs = []
for i in range(distribution_num):
queue_list.append(Queue()) # define queues for saving the outputs of functions
if i < gpu_0_append:
procs.append(Process(target=vad_fnc, args=(init_lr[i], clip_threshold[i], '/gpu:0', max_epoch, queue_list[i]))) # define process
else:
procs.append(Process(target=vad_fnc, args=(init_lr[i], clip_threshold[i], '/gpu:1', max_epoch, queue_list[i])))
for p in procs: # process start
p.start()
M_list = []
for i in range(distribution_num): # save results from queues and close queues
M_list.append(queue_list[i].get())
queue_list[i].close()
for p in procs: # close process
p.join()
return M_list
if __name__ == "__main__":
result = fnctot()
print("********************END********************")
with open('/home/sbie/storage2/result/result_proposed_soft.p', 'wb') as f:
pickle.dump(result, f)
| 2,291
| 25.344828
| 140
|
py
|
VAD
|
VAD-master/lib/python/graph_save.py
|
import os, argparse
import time
import tensorflow as tf
# The original freeze_graph function
# from tensorflow.python.tools.freeze_graph import freeze_graph
dir = os.path.dirname(os.path.realpath(__file__))
def freeze_graph(model_dir, output_dir, output_node_names):
"""Extract the sub graph defined by the output nodes and convert
all its variables into constant
Args:
model_dir: the root folder containing the checkpoint state file
output_node_names: a string, containing all the output node's names,
comma separated
"""
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
"directory: %s" % model_dir)
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
absolute_model_dir = "/".join(input_checkpoint.split('/')[:-1])
localtime = time.asctime( time.localtime(time.time()))
# output_graph = absolute_model_dir + "/frozen_model_" + localtime + ".pb"
output_graph = output_dir + "/frozen_model_" + localtime + ".pb"
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
# We start a session using a temporary fresh Graph
with tf.Session(graph=tf.Graph()) as sess:
# We import the meta graph in the current default Graph
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
# We restore the weights
saver.restore(sess, input_checkpoint)
# We use a built-in TF helper to export variables to constants
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes
output_node_names.split(",") # The output node names are used to select the usefull nodes
)
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
return output_graph_def
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, default="", help="Model folder to export")
parser.add_argument("--output_node_names", type=str, default="", help="The name of the output nodes, comma separated.")
args = parser.parse_args()
freeze_graph(args.model_dir, args.output_node_names)
| 2,946
| 39.930556
| 123
|
py
|
VAD
|
VAD-master/lib/python/parallel_result_load.py
|
import pickle
with open('/home/sbie/storage2/result/result_proposed_soft.p', 'rb') as f:
result = pickle.load(f)
print("result_load")
| 140
| 19.142857
| 74
|
py
|
VAD
|
VAD-master/lib/python/VAD_Proposed.py
|
import tensorflow as tf
import numpy as np
import utils as utils
import re
import data_reader_bDNN_v2 as dr
import os, sys
import time
import subprocess
# import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import scipy.io as sio
from sklearn import metrics
from matplotlib import colors, cm, pyplot as plt
from scipy.stats import binom
from tensorflow.contrib import rnn
from scipy.optimize import brentq
from scipy.interpolate import interp1d
'''program parameter'''
visualization = False
SEED = 1
reset = True # remove all existed logs and initialize log directories
device = '/gpu:0'
tf.reset_default_graph()
tf.set_random_seed(SEED)
mode = 'test'
# FLAGS = tf.flags.FLAGS
# tf.flags.DEFINE_string('mode', 'test', "mode : train/ test [default : train]")
'''file directory'''
file_dir = "/home/sbie/storage2/VAD_Database/SE_TIMIT_MRCG_0328"
input_dir = file_dir
output_dir = file_dir + "/Labels"
valid_file_dir = "/home/sbie/storage2/VAD_Database/NX_TIMIT_MRCG_small2"
# valid_file_dir = "/home/sbie/storage2/VAD_Database/record_data"
test_file_dir = "/home/sbie/storage2/VAD_Database/NX_TIMIT_MRCG_big"
norm_dir = input_dir
logs_dir = "/home/sbie/github/VAD_bDNN_baseline/logs_proposed_multi"
save_dir = "/home/sbie/storage2/VAD_Database/saved_model/candidate"
initial_logs_dir = "/home/sbie/storage2/VAD_Database/saved_model/my_converted_checkpoint2"
# initial_logs_dir = "/fake_dir"
ckpt_name = '/RF'
if mode == 'test':
reset = False
# logs_dir = "/home/sbie/github/VAD_bDNN_baseline/logs_backup0424/logs_proposed930"
valid_file_dir = "/home/sbie/storage2/VAD_Database/NX_TIMIT_MRCG_small"
logs_dir = "/home/sbie/storage2/VAD_Database/saved_model/my_converted_checkpoint"
# test_file_dir = valid_file_dir
if reset:
print('log directory was initialized')
os.popen('rm -rf ' + logs_dir + '/*')
os.popen('mkdir ' + logs_dir + '/train')
os.popen('mkdir ' + logs_dir + '/valid')
summary_list = ["cost", "accuracy_SNR_-5", "accuracy_SNR_0", "accuracy_SNR_5", "accuracy_SNR_10",
"accuracy_across_all_SNRs"]
'''in-output parameter'''
w = 19 # w default = 19
u = 9 # u default = 9
assert (w-1) % u == 0, "w-1 must be divisible by u"
num_features = 768 # for MRCG feature
bdnn_winlen = (((w-1) / u) * 2) + 3
bdnn_inputsize = int(bdnn_winlen * num_features)
bdnn_outputsize = int(bdnn_winlen)
model_config = {"w": w, "u": u}
'''training parameter'''
beta = 1 # softmax parameter
SMALL_NUM = 1e-5
eval_num_batches = 2e5
max_epoch = int(5000)
dropout_rate = 0.5
decay = 0.9 # batch normalization decay factor
eval_th = th = 0.5 # 0.5
batch_size = int(4096) # default = 4096 * 2
valid_batch_size = batch_size
clip_th = 11 # default : 0.90491669
# initLr = 0.000605 # default : 0.000970598, 0.000605
initLr = 0.000605 # default : 0.000970598, 0.000605
lrDecayRate = .95
lrDecayFreq = 200 # default : 200
val_start_step = 100
val_freq = 1
data_len = None
eval_type = 0
'''Model parameter'''
glimpse_hidden = 128
bp_hidden = 128
glimpse_out = bp_out = 128
nGlimpses = 7 # 7
lstm_cell_size = 128
action_hidden_1 = 256 # default : 256
action_hidden_2 = 256 # default : 256
'''attention visualization'''
attention = []
def train_config(c_train_dir, c_valid_dir, c_logs_dir, c_batch_size_eval, c_max_epoch, c_mode):
global file_dir
global input_dir
global output_dir
global valid_file_dir
global norm_dir
global initial_logs_dir
global logs_dir
global ckpt_name
global batch_size
global valid_batch_size
global mode
global max_epoch
file_dir = c_train_dir
valid_file_dir = c_valid_dir
input_dir = file_dir
output_dir = file_dir + "/Labels"
norm_dir = file_dir
initial_logs_dir = logs_dir = c_logs_dir
# batch_size = valid_batch_size = c_batch_size_eval + 2 * w
batch_size = valid_batch_size = c_batch_size_eval
max_epoch = c_max_epoch
mode = c_mode
def test_config(c_test_dir, c_norm_dir, c_initial_logs_dir, c_batch_size_eval, c_data_len):
global test_file_dir
global norm_dir
global initial_logs_dir
global ckpt_name
global valid_batch_size
global data_len
global batch_size
test_file_dir = c_test_dir
norm_dir = c_norm_dir
initial_logs_dir = c_initial_logs_dir + '/backup_ckpt'
print(initial_logs_dir)
batch_size = valid_batch_size = c_batch_size_eval
data_len = c_data_len
def config(c_initLr=initLr, c_clip_threshold=clip_th, c_device=device, c_max_epoch=max_epoch):
global initLr
global clip_th
global device
global max_epoch
initLr = c_initLr
clip_th = c_clip_threshold
device = c_device
max_epoch = c_max_epoch
def smooth_softmax(x):
return tf.sigmoid(x) / tf.expand_dims(tf.reduce_sum(tf.sigmoid(x), axis=1), axis=1)
def softmax(x, b):
return tf.exp(b*x) / tf.expand_dims(tf.reduce_sum(tf.exp(b*x), axis=1), axis=1)
def affine_transform(x, output_dim, seed=0, name=None):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
initializer = tf.truncated_normal_initializer(stddev=0.02, seed=seed)
# weights = tf.get_variable(name + "_w", [x.get_shape()[1], output_dim],
# initializer=tf.contrib.layers.xavier_initializer(seed=seed))
weights = tf.get_variable(name + "_w", [x.get_shape()[1], output_dim],
initializer=initializer)
b = tf.get_variable(name + "_b", [output_dim], initializer=tf.constant_initializer(0.0))
return tf.matmul(x, weights) + b
def sw_sensor(inputs, bp):
global batch_size
# bp = tf.ones(bp.get_shape().as_list(), dtype=tf.float32) / 7 # for fix the attention
bp = tf.expand_dims(bp, axis=2)
bp = tf.tile(bp, (1, 1, num_features))
# bp = tf.reshape(bp, (inputs.get_shape()[0].value, -1, 1))
bp = tf.reshape(bp, (batch_size, -1, 1))
bp = tf.squeeze(bp)
print(bp.get_shape().as_list()[0])
print(inputs.get_shape().as_list()[0])
sw = bp * inputs
return sw
def get_glimpse(inputs, bp, reuse=None, is_training=True):
with tf.variable_scope("glimpse_net", reuse=reuse):
glimpse_input = sw_sensor(inputs, bp)
act_glimpse_hidden = tf.nn.relu(utils.batch_norm_affine_transform(glimpse_input, glimpse_hidden, decay=decay,
name='glimpse_hidden', seed=SEED,
is_training=is_training))
act_bp_hidden = tf.nn.relu(utils.batch_norm_affine_transform(bp, bp_hidden, decay=decay, name='bp_hidden',
seed=SEED,
is_training=is_training))
glimpse_feature = tf.nn.relu(utils.batch_norm_affine_transform(act_glimpse_hidden, glimpse_out, decay=decay,
name='glimpse_out', seed=SEED,
is_training=is_training) +
utils.batch_norm_affine_transform(act_bp_hidden, bp_out, decay=decay,
name='bp_out', seed=SEED,
is_training=is_training))
return glimpse_feature
def multinomial_pmf(mean, sample):
"""
calculate the probability of bernoulli process
:param mean: mean. shape = (batch_size, num_sbs)
:param sample: sample. shape = (batch_size, num_sbs)
:return: p_br: shape = (batch_size, num_sbs)
"""
p_br = tf.reduce_prod(tf.pow(mean, sample), axis=2)
return p_br
def bdnn_prediction(batch_size_in, logits, threshold=th):
bdnn_batch_size = batch_size_in + 2*w
result = np.zeros((int(bdnn_batch_size), 1))
indx = np.arange(int(bdnn_batch_size)) + 1
indx = indx.reshape((int(bdnn_batch_size), 1))
indx = utils.bdnn_transform(indx, w, u)
indx = indx[w:(int(bdnn_batch_size)-w), :]
indx_list = np.arange(w, int(bdnn_batch_size) - w)
for i in indx_list:
indx_temp = np.where((indx-1) == i)
pred = logits[indx_temp]
pred = np.sum(pred)/pred.shape[0]
result[i] = pred
result = np.trim_zeros(result)
soft_result = np.float32(result)
result = np.float32(result) >= threshold
return result.astype(np.float32), soft_result
def summary_generation(eval_file_dir):
summary_dic = {}
noise_list = os.listdir(eval_file_dir)
noise_list = sorted(noise_list)
summary_dic["summary_ph"] = summary_ph = tf.placeholder(dtype=tf.float32)
for name in noise_list:
with tf.variable_scope(name):
for summary_name in summary_list:
summary_dic[name+"_"+summary_name] = tf.summary.scalar(summary_name, summary_ph)
with tf.variable_scope("Averaged_Results"):
summary_dic["cost_across_all_noise_types"] = tf.summary.scalar("cost_across_all_noise_types", summary_ph)
summary_dic["accuracy_across_all_noise_types"]\
= tf.summary.scalar("accuracy_across_all_noise_types", summary_ph)
summary_dic["variance_across_all_noise_types"]\
= tf.summary.scalar("variance_across_all_noise_types", summary_ph)
return summary_dic
def full_evaluation(m_eval, sess_eval, batch_size_eval, eval_file_dir, summary_writer, summary_dic, itr):
mean_cost = []
mean_accuracy = []
mean_auc = []
print("-------- Performance for each of noise types --------")
noise_list = os.listdir(eval_file_dir)
noise_list = sorted(noise_list)
summary_ph = summary_dic["summary_ph"]
for i in range(len(noise_list)):
noise_name = '/' + noise_list[i]
eval_input_dir = eval_file_dir + noise_name
eval_output_dir = eval_file_dir + noise_name + '/Labels'
eval_data_set = dr.DataReader(eval_input_dir, eval_output_dir, norm_dir, w=w, u=u, name="eval")
eval_cost, eval_accuracy, eval_list, eval_auc, eval_auc_list = evaluation(m_eval, eval_data_set, sess_eval, batch_size_eval)
print("--noise type : " + noise_list[i])
print("cost: %.4f, accuracy across all SNRs: %.4f" % (eval_cost, eval_accuracy*100))
print('accuracy wrt SNR:')
print('SNR_-5 : %.4f, SNR_0 : %.4f, SNR_5 : %.4f, SNR_10 : %.4f' % (eval_list[0]*100, eval_list[1]*100,
eval_list[2]*100, eval_list[3]*100))
print('AUC wrt SNR:')
print('SNR_-5 : %.4f, SNR_0 : %.4f, SNR_5 : %.4f, SNR_10 : %.4f' % (eval_auc_list[0]*100, eval_auc_list[1]*100,
eval_auc_list[2]*100, eval_auc_list[3]*100))
print('')
eval_summary_list = [eval_cost] + eval_list + [eval_accuracy]
for j, summary_name in enumerate(summary_list):
summary_str = sess_eval.run(summary_dic[noise_list[i]+"_"+summary_name], feed_dict={summary_ph: eval_summary_list[j]})
summary_writer.add_summary(summary_str, itr)
mean_cost.append(eval_cost)
mean_accuracy.append(eval_accuracy)
mean_auc.append(eval_auc)
mean_cost = np.mean(np.asarray(mean_cost))
var_accuracy = np.var(np.asarray(mean_accuracy))
mean_accuracy = np.mean(np.asarray(mean_accuracy))
mean_auc = np.mean(np.asarray(mean_auc))
summary_writer.add_summary(sess_eval.run(summary_dic["cost_across_all_noise_types"],
feed_dict={summary_ph: mean_cost}), itr)
summary_writer.add_summary(sess_eval.run(summary_dic["accuracy_across_all_noise_types"],
feed_dict={summary_ph: mean_accuracy}), itr)
summary_writer.add_summary(sess_eval.run(summary_dic["variance_across_all_noise_types"],
feed_dict={summary_ph: var_accuracy}), itr)
print("-------- Performance across all of noise types --------")
print("cost : %.4f" % mean_cost)
print("******* averaged accuracy across all noise_types : %.4f *******" % (mean_accuracy*100))
print("******* averaged auc across all noise_types : %.7f *******" % (mean_auc*100))
print("******* variance of accuracies across all noise_types : %6.6f *******" % var_accuracy)
return mean_auc, var_accuracy
def evaluation(m_valid, valid_data_set, sess, eval_batch_size):
# num_samples = valid_data_set.num_samples
# num_batches = num_samples / batch_size
avg_valid_cost = 0.
avg_valid_accuracy = 0.
avg_valid_auc = 0.
avg_sampled_bps = 0.
itr_sum = 0.
auc_list = [0 for i in range(valid_data_set._file_len)]
accuracy_list = [0 for i in range(valid_data_set._file_len)]
cost_list = [0 for i in range(valid_data_set._file_len)]
itr_file = 0
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(eval_batch_size)
if valid_data_set.file_change_checker():
global attention
auc_list[itr_file] = avg_valid_auc / itr_sum
accuracy_list[itr_file] = avg_valid_accuracy / itr_sum
cost_list[itr_file] = avg_valid_cost / itr_sum
avg_sampled_bps = avg_sampled_bps / itr_sum
avg_valid_accuracy = 0.
avg_valid_cost = 0.
avg_valid_auc = 0.
itr_sum = 0
itr_file += 1
valid_data_set.file_change_initialize()
print("%.4f %.4f %.4f %.4f %.4f %.4f %.4f" % (avg_sampled_bps[0], avg_sampled_bps[1], avg_sampled_bps[2],
avg_sampled_bps[3], avg_sampled_bps[4],
avg_sampled_bps[5], avg_sampled_bps[6]))
attention.append(avg_sampled_bps)
avg_sampled_bps = 0
if valid_data_set.eof_checker():
valid_data_set.reader_initialize()
# print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: valid_labels,
m_valid.keep_probability: 1}
valid_cost, valid_accuracy, valid_soft_result, valid_raw_labels\
= sess.run([m_valid.cost, m_valid.reward, m_valid.soft_result, m_valid.raw_labels],
feed_dict=feed_dict)
# auc calculate
fpr, tpr, thresholds = metrics.roc_curve(valid_raw_labels, valid_soft_result, pos_label=1)
valid_auc = metrics.auc(fpr, tpr)
# valid_auc = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
# valid_auc = thresholds[np.argmin(abs(tpr-fpr))]
sampled_bps_tensor = sess.run(m_valid.sampled_bps_tensor, feed_dict=feed_dict)
avg_sampled_bps += np.mean(sampled_bps_tensor[:, -1, :], axis=0)
avg_valid_auc += valid_auc
avg_valid_cost += valid_cost
avg_valid_accuracy += valid_accuracy
itr_sum += 1
total_avg_valid_auc = np.asscalar(np.mean(np.asarray(auc_list)))
total_avg_valid_cost = np.asscalar(np.mean(np.asarray(cost_list)))
total_avg_valid_accuracy = np.asscalar(np.mean(np.asarray(accuracy_list)))
return total_avg_valid_cost, total_avg_valid_accuracy, accuracy_list, total_avg_valid_auc, auc_list
def dense_to_one_hot(labels_dense, num_classes=2):
"""Convert class labels from scalars to one-hot vectors."""
# copied from TensorFlow tutorial
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[(index_offset + labels_dense.ravel()).astype(int)] = 1
return labels_one_hot.astype(np.float32)
class Model(object):
def __init__(self, batch_size, reuse=None, is_training=True):
self.cell_outputs = []
self.batch_size = batch_size
self.keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
self.inputs = tf.placeholder(tf.float32, shape=[None, bdnn_inputsize],
name="inputs")
self.labels = tf.placeholder(tf.float32, shape=[None, bdnn_outputsize], name="labels")
self.is_training = is_training
self.mean_bps = []
self.sampled_bps = []
self.baselines = []
self.global_step = tf.Variable(0, trainable=False)
self.lr = tf.train.exponential_decay(initLr, self.global_step, lrDecayFreq, lrDecayRate, staircase=True)
self.raw_reward = 0
# set inference graph
cell_outputs = self.inference(reuse) # (batch_size, bdnn_outputsize)
# set objective function
self.cost, self.reward, self.train_op, self.avg_b, self.rminusb, self.sampled_bps_tensor, self.p_bps,\
self.print_lr, self.soft_result, self.raw_labels = self.calc_reward(cell_outputs)
def inference(self, reuse=None):
# initialization
raw_inputs = self.inputs
batch_size = self.batch_size
keep_prob = self.keep_probability
is_training = self.is_training
tf.set_random_seed(SEED) # initialize the random seed at graph level
lstm_cell = rnn.LayerNormBasicLSTMCell(lstm_cell_size, dropout_keep_prob=keep_prob, reuse=reuse,
dropout_prob_seed=SEED)
initial_state = lstm_cell.zero_state(batch_size, tf.float32)
init_sw = tf.ones([batch_size, int(bdnn_winlen)]) * 0 # start sign
self.mean_bps.append(init_sw)
init_sw = tf.cast(tf.greater(init_sw, 0.4), tf.float32)
self.sampled_bps.append(init_sw)
reuse_recurrent = None
init_glimpse = self.get_glimpse(raw_inputs, init_sw, reuse=reuse_recurrent) # (batch_size, glimpse_out)
inputs = [0] * nGlimpses
outputs = [0] * nGlimpses
glimpse = init_glimpse
for time_step in range(nGlimpses):
if time_step == 0:
with tf.variable_scope("core_network", reuse=reuse_recurrent):
(cell_output, cell_state) = lstm_cell(glimpse, initial_state)
self.cell_outputs.append(initial_state)
else:
reuse_recurrent = True
with tf.variable_scope("core_network", reuse=reuse_recurrent):
(cell_output, cell_state) = lstm_cell(glimpse, cell_state)
inputs[time_step] = glimpse
outputs[time_step] = cell_output
if time_step != nGlimpses - 1: # not final time_step
glimpse = self.get_next_input(cell_output, reuse=reuse_recurrent)
else: # final time_step
with tf.variable_scope("baseline", reuse=reuse_recurrent):
baseline = tf.sigmoid(affine_transform(((cell_output)), 1, name='baseline'))
self.baselines.append(baseline)
return outputs
def sw_sensor(self, inputs, bp):
# bp = tf.ones(bp.get_shape().as_list(), dtype=tf.float32) / 7 # for fix the attention
bp = tf.expand_dims(bp, axis=2)
bp = tf.tile(bp, (1, 1, num_features))
# bp = tf.reshape(bp, (inputs.get_shape()[0].value, -1, 1))
bp = tf.reshape(bp, (self.batch_size, -1, 1))
bp = tf.squeeze(bp)
sw = bp * inputs
return sw
def get_glimpse(self, inputs, bp, reuse=None):
is_training = self.is_training
with tf.variable_scope("glimpse_net", reuse=reuse):
glimpse_input = self.sw_sensor(inputs, bp)
act_glimpse_hidden = tf.nn.relu(
utils.batch_norm_affine_transform(glimpse_input, glimpse_hidden, decay=decay,
name='glimpse_hidden', seed=SEED, is_training=is_training))
act_bp_hidden = tf.nn.relu(utils.batch_norm_affine_transform(bp, bp_hidden, decay=decay, name='bp_hidden',
seed=SEED, is_training=is_training))
glimpse_feature = tf.nn.relu(utils.batch_norm_affine_transform(act_glimpse_hidden, glimpse_out, decay=decay,
name='glimpse_out',
seed=SEED, is_training=is_training) +
utils.batch_norm_affine_transform(act_bp_hidden, bp_out, decay=decay,
seed=SEED, name='bp_out', is_training=is_training))
return glimpse_feature
def get_next_input(self, cell_output, reuse=None):
raw_inputs = self.inputs
is_training = self.is_training
with tf.variable_scope("baseline", reuse=reuse):
baseline = tf.sigmoid(affine_transform(((cell_output)), 1, name='baseline'))
self.baselines.append(baseline)
with tf.variable_scope("selection_network", reuse=reuse):
mean_bp = smooth_softmax(
utils.batch_norm_affine_transform(cell_output, int(bdnn_winlen), decay=decay, name='selection',
is_training=is_training))
# mean_bp = softmax(
# utils.batch_norm_affine_transform(cell_output, int(bdnn_winlen), decay=decay, name='selection',
# is_training=is_training), beta)
self.mean_bps.append(mean_bp)
# rand_seq = tf.random_uniform(mean_bp.get_shape().as_list(), minval=0, maxval=1, seed=SEED)
if is_training:
sampled_bp = tf.multinomial(mean_bp, num_samples=1, seed=SEED)
sampled_bp = utils.onehot_tensor(sampled_bp, bdnn_winlen)
else:
sampled_bp = mean_bp
sampled_bp = tf.stop_gradient(sampled_bp)
self.sampled_bps.append(sampled_bp)
return self.get_glimpse(raw_inputs, mean_bp, reuse=True)
def action_network(self, outputs):
is_training = self.is_training
with tf.variable_scope("action_network"):
h1_out = tf.nn.relu(utils.batch_norm_affine_transform(outputs, action_hidden_1,
decay=decay, name='action_hidden_1',
seed=SEED, is_training=is_training))
h1_out = tf.nn.dropout(h1_out, keep_prob=self.keep_probability, seed=SEED)
h2_out = tf.nn.relu(utils.batch_norm_affine_transform(h1_out, action_hidden_2,
decay=decay, name='action_hidden_2', seed=SEED,
is_training=is_training))
h2_out = tf.nn.dropout(h2_out, keep_prob=self.keep_probability, seed=SEED)
return h2_out
def bdnn_prediction(self, logits, threshold):
batch_size_tensor = tf.constant(self.batch_size, dtype=tf.float32)
th_tenor = tf.constant(threshold, dtype=tf.float32)
result, soft_result = tf.py_func(bdnn_prediction, [batch_size_tensor, logits, th_tenor], Tout=[tf.float32, tf.float32])
return result, soft_result
@staticmethod
def np_trim_zeros(x):
return np.trim_zeros(x)
def calc_reward(self, outputs):
batch_size = self.batch_size
# consider the action at the last time step
outputs = outputs[-1]
outputs = tf.reshape(outputs, (batch_size, lstm_cell_size))
# get the baseline
b = tf.stack(self.baselines)
b = tf.tile(b, [1, 1, 1])
b = tf.reshape(tf.transpose(b, [1, 0, 2]), [batch_size, nGlimpses])
no_grad_b = tf.stop_gradient(b)
# get the action
action_out = self.action_network(outputs)
logits = tf.sigmoid(affine_transform(action_out, int(bdnn_outputsize), seed=SEED, name="softmax"))
logits = tf.identity(logits, "logits")
result, soft_result = self.bdnn_prediction(logits, threshold=th)
# soft_result = tf.identity(soft_result, 'soft_pred')
# convert list of tensors to one big tensor
mean_bps = tf.concat(axis=0, values=self.mean_bps)
mean_bps = tf.reshape(mean_bps, (nGlimpses, self.batch_size, int(bdnn_winlen)))
mean_bps = tf.transpose(mean_bps, [1, 0, 2])
sampled_bps = tf.concat(axis=0, values=self.sampled_bps)
sampled_bps = tf.reshape(sampled_bps, (nGlimpses, self.batch_size, int(bdnn_winlen)))
sampled_bps = tf.transpose(sampled_bps, [1, 0, 2])
# reward for all examples in the batch
raw_indx = int(np.floor(bdnn_outputsize / 2))
raw_labels = self.labels[:, raw_indx]
raw_labels = tf.reshape(raw_labels, shape=(-1, 1))
raw_labels = tf.identity(raw_labels, 'raw_labels')
R = tf.cast(tf.equal(result, raw_labels), tf.float32)
soft_R = tf.stop_gradient(tf.cast(tf.abs(tf.subtract(1 - soft_result, raw_labels)), tf.float32))
soft_R = tf.reshape(soft_R, (batch_size, 1))
soft_R = tf.tile(soft_R, [1, nGlimpses])
# R = tf.cast(tf.abs(tf.subtract(1 - soft_result, raw_labels)), tf.float32)
R = tf.stop_gradient(R)
R = tf.reshape(R, (batch_size, 1))
self.raw_reward = R
R = tf.tile(R, [1, nGlimpses])
reward = tf.reduce_mean(R)
# select the window
p_bps = multinomial_pmf(mean_bps, sampled_bps)
p_bps = tf.reshape(p_bps, (self.batch_size, nGlimpses))
# define the cost function
sv_part = -tf.square(self.labels - logits)
rf_part = tf.log(p_bps + SMALL_NUM) * (R - no_grad_b)
# J = sv_part
J = tf.concat(axis=1, values=[sv_part, rf_part]) # comment for sv only
J = tf.reduce_sum(J, 1)
J = J - tf.reduce_mean(tf.square(R - b), 1) # comment for sv only
J = tf.reduce_mean(J, 0)
# cost = -J
cost = -tf.reduce_mean(J)
var_list = tf.trainable_variables()
grads = tf.gradients(cost, var_list)
grads, _ = tf.clip_by_global_norm(grads, clip_th)
optimizer = tf.train.AdamOptimizer(self.lr)
train_op = optimizer.apply_gradients(zip(grads, var_list), global_step=self.global_step)
return cost, reward, train_op, tf.reduce_mean(b), tf.reduce_mean(R - b), \
sampled_bps, tf.reduce_mean(p_bps), self.lr, soft_result, raw_labels
def main(prj_dir=None, model=None, mode=None):
# Configuration Part #
if mode is 'train':
import path_setting as ps
set_path = ps.PathSetting(prj_dir, model)
logs_dir = initial_logs_dir = set_path.logs_dir
input_dir = set_path.input_dir
output_dir = set_path.output_dir
norm_dir = set_path.norm_dir
valid_file_dir = set_path.valid_file_dir
sys.path.insert(0, prj_dir+'/configure/ACAM')
import config as cg
global initLr, dropout_rate, max_epoch, batch_size, valid_batch_size
initLr = cg.lr
dropout_rate = cg.dropout_rate
max_epoch = cg.max_epoch
batch_size = valid_batch_size = cg.batch_size
global w, u
w = cg.w
u = cg.u
global bdnn_winlen, bdnn_inputsize, bdnn_outputsize
bdnn_winlen = (((w-1) / u) * 2) + 3
bdnn_inputsize = int(bdnn_winlen * num_features)
bdnn_outputsize = int(bdnn_winlen)
global glimpse_hidden, bp_hidden, glimpse_out, bp_out, nGlimpses,\
lstm_cell_size, action_hidden_1, action_hidden_2
glimpse_hidden = cg.glimpse_hidden
bp_hidden = cg.bp_hidden
glimpse_out = bp_out = cg.glimpse_out
nGlimpses = cg.nGlimpse # 7
lstm_cell_size = cg.lstm_cell_size
action_hidden_1 = cg.action_hidden_1 # default : 256
action_hidden_2 = cg.action_hidden_2 # default : 256
# Graph Part #
mean_acc_list = []
var_acc_list = []
print('Mode : ' + mode)
print("Graph initialization...")
with tf.device(device):
with tf.variable_scope("model", reuse=None):
m_train = Model(batch_size=batch_size, reuse=None, is_training=True)
# m_train(batch_size)
with tf.device(device):
with tf.variable_scope("model", reuse=True):
m_valid = Model(batch_size=valid_batch_size, reuse=True, is_training=False)
print("Done")
# Summary Part #
print("Setting up summary op...")
summary_ph = tf.placeholder(dtype=tf.float32)
with tf.variable_scope("Training_procedure"):
cost_summary_op = tf.summary.scalar("cost", summary_ph)
accuracy_summary_op = tf.summary.scalar("accuracy", summary_ph)
# train_summary_writer = tf.summary.FileWriter(logs_dir + '/train/', max_queue=4)
# valid_summary_writer = tf.summary.FileWriter(logs_dir + '/valid/', max_queue=4)
# summary_dic = summary_generation(valid_file_dir)
print("Done")
# Model Save Part #
print("Setting up Saver...")
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(initial_logs_dir)
print("Done")
# Session Part #
sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
sess_config.gpu_options.allow_growth = True
sess = tf.Session(config=sess_config)
if mode is 'train':
train_summary_writer = tf.summary.FileWriter(logs_dir + '/train/', sess.graph, max_queue=2)
valid_summary_writer = tf.summary.FileWriter(logs_dir + '/valid/', max_queue=2)
if ckpt and ckpt.model_checkpoint_path: # model restore
print("Model restored...")
print(initial_logs_dir+ckpt_name)
if mode is 'train':
saver.restore(sess, ckpt.model_checkpoint_path)
else:
saver.restore(sess, initial_logs_dir+ckpt_name)
saver.save(sess, initial_logs_dir + "/model_ACAM.ckpt", 0) # model save
print("Done")
else:
sess.run(tf.global_variables_initializer()) # if the checkpoint doesn't exist, do initialization
if mode is 'train':
train_data_set = dr.DataReader(input_dir, output_dir, norm_dir, w=w, u=u,
name="train") # training data reader initialization
if mode is 'train':
for itr in range(max_epoch):
start_time = time.time()
train_inputs, train_labels = train_data_set.next_batch(batch_size)
feed_dict = {m_train.inputs: train_inputs, m_train.labels: train_labels,
m_train.keep_probability: dropout_rate}
sess.run(m_train.train_op, feed_dict=feed_dict)
if itr % 10 == 0 and itr >= 0:
train_cost, train_reward, train_avg_b, train_rminusb, train_p_bps, train_lr \
= sess.run([m_train.cost, m_train.reward, m_train.avg_b, m_train.rminusb, m_train.p_bps,
m_train.print_lr]
, feed_dict=feed_dict)
duration = time.time() - start_time
print("Step: %d, cost: %.4f, accuracy: %4.4f, b: %4.4f, R-b: %4.4f, p_bps: %4.4f, lr: %7.6f (%.3f sec)"
% (itr, train_cost, train_reward, train_avg_b, train_rminusb, train_p_bps, train_lr, duration))
train_cost_summary_str = sess.run(cost_summary_op, feed_dict={summary_ph: train_cost})
train_accuracy_summary_str = sess.run(accuracy_summary_op, feed_dict={summary_ph: train_reward})
train_summary_writer.add_summary(train_cost_summary_str, itr) # write the train phase summary to event files
train_summary_writer.add_summary(train_accuracy_summary_str, itr)
# if train_data_set.eof_checker():
# if itr % val_freq == 0 and itr >= val_start_step:
if itr % 50 == 0 and itr > 0:
saver.save(sess, logs_dir + "/model.ckpt", itr) # model save
print('validation start!')
valid_accuracy, valid_cost = \
utils.do_validation(m_valid, sess, valid_file_dir, norm_dir,
type='ACAM')
print("valid_cost: %.4f, valid_accuracy=%4.4f" % (valid_cost, valid_accuracy * 100))
valid_cost_summary_str = sess.run(cost_summary_op, feed_dict={summary_ph: valid_cost})
valid_accuracy_summary_str = sess.run(accuracy_summary_op, feed_dict={summary_ph: valid_accuracy})
valid_summary_writer.add_summary(valid_cost_summary_str, itr) # write the train phase summary to event files
valid_summary_writer.add_summary(valid_accuracy_summary_str, itr)
# mean_accuracy, var_accuracy = full_evaluation(m_valid, sess, valid_batch_size, valid_file_dir, valid_summary_writer, summary_dic, itr)
# if mean_accuracy >= 0.991:
#
# print('model was saved!')
# model_name = '/model' + str(int(mean_accuracy * 1e4)) + 'and'\
# + str(int(var_accuracy * 1e5)) + '.ckpt'
# saver.save(sess, save_dir + model_name, itr)
# mean_acc_list.append(mean_accuracy)
# var_acc_list.append(var_accuracy)
# train_data_set.initialize()
elif mode == 'test':
final_softout, final_label = utils.vad_test(m_valid, sess, valid_batch_size, test_file_dir, norm_dir, data_len,
eval_type)
# if data_len is None:
# return final_softout, final_label
# else:
# final_softout = final_softout[0:data_len, :]
# final_label = final_label[0:data_len, :]
# fpr, tpr, thresholds = metrics.roc_curve(final_label, final_softout, pos_label=1)
# eval_auc = metrics.auc(fpr, tpr)
# print(eval_auc)
# full_evaluation(m_valid, sess, valid_batch_size, test_file_dir, valid_summary_writer, summary_dic, 0)
# if visualization:
# global attention
# attention = np.asarray(attention)
# sio.savemat('attention.mat', {'attention' : attention})
# subprocess.call(['./visualize.sh'])
if data_len is None:
return final_softout, final_label
else:
return final_softout[0:data_len, :], final_label[0:data_len, :]
if __name__ == "__main__":
tf.app.run()
| 35,070
| 38.229306
| 152
|
py
|
VAD
|
VAD-master/lib/python/freeze_graph.py
| 0
| 0
| 0
|
py
|
|
VAD
|
VAD-master/lib/python/feat_ex.py
|
import sys
sys.path.insert(0, './lib/python')
import VAD_Proposed as Vp
import VAD_DNN as Vd
import VAD_bDNN as Vb
import VAD_LSTM_2 as Vl
import scipy.io as sio
import os, getopt
# norm_dir = "./norm_data"
# data_dir = "./sample_data"
# ckpt_name = '/model9918and41.ckpt-2'
# model_dir = "./saved_model"
# valid_batch_size = 4134
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], 'h', ["data_dir=", "save_dir="])
except getopt.GetoptError as err:
print(str(err))
sys.exit(1)
if len(opts) != 2:
print("arguments are not enough.")
sys.exit(1)
for opt, arg in opts:
if opt == '-h':
sys.exit(0)
elif opt == '--data_dir':
data_dir = str(arg)
elif opt == '--save_dir':
save_dir = str(arg)
data_dir = os.path.abspath('../..') + '/data' + data_dir
train_data_dir = data_dir + '/train'
valid_data_dir = data_dir + '/valid'
save_dir = os.path.abspath('../..') + '/data' + save_dir
train_save_dir = save_dir + '/train'
valid_save_dir = save_dir + '/valid'
os.system("rm -rf " + save_dir)
os.system("mkdir " + save_dir)
os.system("mkdir " + save_dir + '/train')
os.system("mkdir " + save_dir + '/valid')
os.system("matlab -r \"try acoustic_feat_ex(\'%s\',\'%s\'); catch; end; quit\"" % (train_data_dir, train_save_dir))
os.system("matlab -r \"try acoustic_feat_ex(\'%s\',\'%s\'); catch; end; quit\"" % (valid_data_dir, valid_save_dir))
# os.system("rm -rf")
print("done")
| 1,566
| 28.018519
| 119
|
py
|
VAD
|
VAD-master/lib/python/eer_test.py
|
import numpy as np
import scipy.io as sio
import sys
import os, sys, getopt
from sklearn import metrics
from scipy.optimize import brentq
from scipy.interpolate import interp1d
def eer(pred, label):
fpr, tpr, thresholds = metrics.roc_curve(label, pred, pos_label=1)
# valid_auc = metrics.auc(fpr, tpr)
eer_result = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
return eer_result * 100
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'h', ["data_dir="])
except getopt.GetoptError as err:
print(str(err))
sys.exit(1)
# if len(opts) != 6:
# print("arguments are not enough.")
# sys.exit(1)
for opt, arg in opts:
if opt == '-h':
sys.exit(0)
elif opt == '--data_dir':
data_dir = str(arg)
label = data_dir + '/label.mat'
pred = data_dir + '/pred.mat'
label = sio.loadmat(label)
label = label['label']
pred = sio.loadmat(pred)
pred = pred['pred']
eer_result = eer(pred, label)
print(eer_result)
if __name__ == '__main__':
main()
| 1,104
| 21.1
| 73
|
py
|
VAD
|
VAD-master/lib/python/medium-tffreeze-2.py
|
import tensorflow as tf
def load_graph(frozen_graph_filename):
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we import the graph_def into a new Graph and returns it
with tf.Graph().as_default() as graph:
# The name var will prefix every op/nodes in your graph
# Since we load everything in a new graph, this is not needed
tf.import_graph_def(graph_def, name="prefix")
return graph
| 623
| 40.6
| 75
|
py
|
VAD
|
VAD-master/lib/python/utils.py
|
# Utils used with tensorflow implementation
import tensorflow as tf
import numpy as np
import scipy.misc as misc
import os, sys
from six.moves import urllib
import tarfile
import zipfile
import scipy.io
import re
import data_reader_bDNN_v2 as dr
import data_reader_DNN_v2 as dnn_dr
import data_reader_RNN as rnn_dr
from sklearn import metrics
__author__ = 'Juntae'
def vad_test(m_eval, sess_eval, batch_size_eval, eval_file_dir, norm_dir, data_len, eval_type):
eval_input_dir = eval_file_dir
eval_output_dir = eval_file_dir + '/Labels'
pad_size = batch_size_eval - data_len % batch_size_eval
if eval_type != 2:
eval_data_set = dr.DataReader(eval_input_dir, eval_output_dir, norm_dir, w=19, u=9, name="eval")
else:
eval_data_set = dnn_dr.DataReader(eval_input_dir, eval_output_dir, norm_dir, w=19, u=9, name="eval")
final_softout, final_label = evaluation(m_eval, eval_data_set, sess_eval, batch_size_eval, eval_type)
return final_softout, final_label
def affine_transform(x, output_dim, seed=0, name=None):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
initializer = tf.truncated_normal_initializer(stddev=0.02, seed=seed)
# weights = tf.get_variable(name + "_w", [x.get_shape()[1], output_dim],
# initializer=tf.contrib.layers.xavier_initializer(seed=seed))
weights = tf.get_variable(name + "_w", [x.get_shape()[1], output_dim],
initializer=initializer)
b = tf.get_variable(name + "_b", [output_dim], initializer=tf.constant_initializer(0.0))
return tf.matmul(x, weights) + b
def evaluation(m_valid, valid_data_set, sess, eval_batch_size, eval_type):
# num_samples = valid_data_set.num_samples
# num_batches = num_samples / batch_size
if eval_type == 0: # proposed
final_softout = []
final_label = []
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(eval_batch_size)
feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: valid_labels,
m_valid.keep_probability: 1}
if valid_data_set.eof_checker():
final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
final_label = np.reshape(np.asarray(final_label), [-1, 1])
valid_data_set.reader_initialize()
# print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
valid_soft_result, valid_raw_labels = sess.run([m_valid.soft_result, m_valid.raw_labels],
feed_dict=feed_dict)
final_softout.append(valid_soft_result)
final_label.append(valid_raw_labels)
# if valid_data_set.eof_checker():
# final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
# final_label = np.reshape(np.asarray(final_label), [-1, 1])
# valid_data_set.reader_initialize()
# # print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
# break
return final_softout, final_label
elif eval_type == 1: # bdnn
final_softout = []
final_label = []
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(eval_batch_size)
feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: valid_labels,
m_valid.keep_probability: 1}
if valid_data_set.eof_checker():
final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
final_label = np.reshape(np.asarray(final_label), [-1, 1])
valid_data_set.reader_initialize()
# print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
valid_cost, valid_logits = sess.run([m_valid.cost, m_valid.logits], feed_dict=feed_dict)
valid_pred, soft_pred = bdnn_prediction(eval_batch_size + 2*valid_data_set._w, valid_logits, threshold=0.6)
# print(np.sum(valid_pred))
raw_indx = int(np.floor(valid_labels.shape[1] / 2))
raw_labels = valid_labels[:, raw_indx]
raw_labels = raw_labels.reshape((-1, 1))
final_softout.append(soft_pred)
final_label.append(raw_labels)
# if valid_data_set.eof_checker():
# final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
# final_label = np.reshape(np.asarray(final_label), [-1, 1])
# valid_data_set.reader_initialize()
# # print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
# break
return final_softout, final_label
elif eval_type == 2: # dnn
final_softout = []
final_label = []
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(eval_batch_size)
one_hot_labels = valid_labels.reshape((-1, 1))
one_hot_labels = dense_to_one_hot(one_hot_labels, num_classes=2)
feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: one_hot_labels,
m_valid.keep_probability: 1}
if valid_data_set.eof_checker():
final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
final_label = np.reshape(np.asarray(final_label), [-1, 1])
valid_data_set.reader_initialize()
# print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
soft_pred, raw_labels = sess.run([m_valid.softpred, m_valid.raw_labels], feed_dict=feed_dict)
raw_labels = raw_labels.reshape((-1, 1))
final_softout.append(soft_pred)
final_label.append(raw_labels)
# if valid_data_set.eof_checker():
# final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
# final_label = np.reshape(np.asarray(final_label), [-1, 1])
# valid_data_set.reader_initialize()
# # print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
# break
return final_softout, final_label
def onehot_tensor(label_batch, num_labels):
batch_size = label_batch.get_shape().as_list()[0]
num_labels = tf.cast(num_labels, tf.int32)
sparse_labels = tf.cast(tf.reshape(label_batch, [-1, 1]), dtype=tf.int32)
indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1])
concated = tf.concat(axis=1, values=[indices, sparse_labels])
outshape = tf.stack([batch_size, num_labels])
labels = tf.sparse_to_dense(concated, outshape, 1.0, 0.0)
return labels
def get_model_data(dir_path, model_url):
maybe_download_and_extract(dir_path, model_url)
filename = model_url.split("/")[-1]
filepath = os.path.join(dir_path, filename)
if not os.path.exists(filepath):
raise IOError("VGG Model not found!")
data = scipy.io.loadmat(filepath)
return data
def maybe_download_and_extract(dir_path, url_name, is_tarfile=False, is_zipfile=False):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
filename = url_name.split('/')[-1]
filepath = os.path.join(dir_path, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(url_name, filepath, reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
if is_tarfile:
tarfile.open(filepath, 'r:gz').extractall(dir_path)
elif is_zipfile:
with zipfile.ZipFile(filepath) as zf:
zip_dir = zf.namelist()[0]
zf.extractall(dir_path)
def save_image(image, save_dir, name, mean=None):
"""
Save image by unprocessing if mean given else just save
:param mean:
:param image:
:param save_dir:
:param name:
:return:
"""
if mean:
image = unprocess_image(image, mean)
misc.imsave(os.path.join(save_dir, name + ".png"), image)
def get_variable(weights, name):
init = tf.constant_initializer(weights, dtype=tf.float32)
var = tf.get_variable(name=name, initializer=init, shape=weights.shape)
return var
def weight_variable(shape, stddev=0.02, name=None):
# print(shape)
initial = tf.truncated_normal(shape, stddev=stddev)
#initial = tf.contrib.layers.xavier_initializer_conv2d()
if name is None:
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
def bias_variable(shape, name=None):
initial = tf.constant(0.0, shape=shape)
if name is None:
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
def get_tensor_size(tensor):
from operator import mul
return reduce(mul, (d.value for d in tensor.get_shape()), 1)
def conv2d_basic(x, W, bias, stride=1):
conv = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding="SAME")
return tf.nn.bias_add(conv, bias)
def conv2d_basic_VALID(x, W, bias, stride=1):
conv = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding="VALID")
return tf.nn.bias_add(conv, bias)
def conv2d_strided(x, W, b):
conv = tf.nn.conv2d(x, W, strides=[1, 2, 2, 1], padding="SAME")
return tf.nn.bias_add(conv, b)
def conv2d_transpose_strided(x, W, b, output_shape=None, stride = 2):
# print x.get_shape()
# print W.get_shape()
if output_shape is None:
output_shape = x.get_shape().as_list()
output_shape[1] *= 2
output_shape[2] *= 2
output_shape[3] = W.get_shape().as_list()[2]
# print output_shape
conv = tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding="SAME")
return tf.nn.bias_add(conv, b)
def leaky_relu(x, alpha=0.0, name=""):
return tf.maximum(alpha * x, x, name)
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def max_pool_2x1(x):
return tf.nn.max_pool(x, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1], padding="SAME")
def avg_pool_2x2(x):
return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def local_response_norm(x):
return tf.nn.lrn(x, depth_radius=5, bias=2, alpha=1e-4, beta=0.75)
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5):
"""
Code taken from http://stackoverflow.com/a/34634291/2267819
"""
with tf.variable_scope(scope):
beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
, trainable=True)
gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, 0.02),
trainable=True)
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=decay)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
return normed
def process_image(image, mean_pixel):
return image - mean_pixel
def unprocess_image(image, mean_pixel):
return image + mean_pixel
def bottleneck_unit(x, out_chan1, out_chan2, down_stride=False, up_stride=False, name=None):
"""
Modified implementation from github ry?!
"""
def conv_transpose(tensor, out_channel, shape, strides, name=None):
out_shape = tensor.get_shape().as_list()
in_channel = out_shape[-1]
kernel = weight_variable([shape, shape, out_channel, in_channel], name=name)
shape[-1] = out_channel
return tf.nn.conv2d_transpose(x, kernel, output_shape=out_shape, strides=[1, strides, strides, 1],
padding='SAME', name='conv_transpose')
def conv(tensor, out_chans, shape, strides, name=None):
in_channel = tensor.get_shape().as_list()[-1]
kernel = weight_variable([shape, shape, in_channel, out_chans], name=name)
return tf.nn.conv2d(x, kernel, strides=[1, strides, strides, 1], padding='SAME', name='conv')
def bn(tensor, name=None):
"""
:param tensor: 4D tensor input
:param name: name of the operation
:return: local response normalized tensor - not using batch normalization :(
"""
return tf.nn.lrn(tensor, depth_radius=5, bias=2, alpha=1e-4, beta=0.75, name=name)
in_chans = x.get_shape().as_list()[3]
if down_stride or up_stride:
first_stride = 2
else:
first_stride = 1
with tf.variable_scope('res%s' % name):
if in_chans == out_chan2:
b1 = x
else:
with tf.variable_scope('branch1'):
if up_stride:
b1 = conv_transpose(x, out_chans=out_chan2, shape=1, strides=first_stride,
name='res%s_branch1' % name)
else:
b1 = conv(x, out_chans=out_chan2, shape=1, strides=first_stride, name='res%s_branch1' % name)
b1 = bn(b1, 'bn%s_branch1' % name, 'scale%s_branch1' % name)
with tf.variable_scope('branch2a'):
if up_stride:
b2 = conv_transpose(x, out_chans=out_chan1, shape=1, strides=first_stride, name='res%s_branch2a' % name)
else:
b2 = conv(x, out_chans=out_chan1, shape=1, strides=first_stride, name='res%s_branch2a' % name)
b2 = bn(b2, 'bn%s_branch2a' % name, 'scale%s_branch2a' % name)
b2 = tf.nn.relu(b2, name='relu')
with tf.variable_scope('branch2b'):
b2 = conv(b2, out_chans=out_chan1, shape=3, strides=1, name='res%s_branch2b' % name)
b2 = bn(b2, 'bn%s_branch2b' % name, 'scale%s_branch2b' % name)
b2 = tf.nn.relu(b2, name='relu')
with tf.variable_scope('branch2c'):
b2 = conv(b2, out_chans=out_chan2, shape=1, strides=1, name='res%s_branch2c' % name)
b2 = bn(b2, 'bn%s_branch2c' % name, 'scale%s_branch2c' % name)
x = b1 + b2
return tf.nn.relu(x, name='relu')
def add_to_regularization_and_summary(var):
if var is not None:
tf.summary.histogram(var.op.name, var)
tf.add_to_collection("reg_loss", tf.nn.l2_loss(var))
def add_activation_summary(var):
if var is not None:
tf.summary.histogram(var.op.name + "/activation", var)
tf.summary.scalar(var.op.name + "/sparsity", tf.nn.zero_fraction(var))
def add_gradient_summary(grad, var):
if grad is not None:
tf.summary.histogram(var.op.name + "/gradient", grad)
def get_conv_shape(name):
spec = re.split(':|, |->', name)
kernel_size = int(spec[5])
stride = int(spec[7])
input_fm = int(spec[9])
output_fm = int(spec[10])
conv_shape = [kernel_size, kernel_size, input_fm, output_fm]
return conv_shape, stride
def get_1d_conv_shape(name):
spec = re.split(':|, |->', name)
kernel_size = int(spec[5])
stride = int(spec[7])
input_fm = int(spec[9])
output_fm = int(spec[10])
conv_shape = [kernel_size, 1, input_fm, output_fm]
return conv_shape, stride
def write_val_summary(graph, loss):
with graph.as_default():
val_loss = tf.placeholder(tf.float32, shape=[1], name="loss")
tf.summary.scalar("entropy", val_loss)
summary_op = tf.summary.merge_all()
return summary_op
def conv2lstm_layer(inputs, num_fm):
"""
make the conv_out flat for rnn input
:param inputs:
:param num_fm: # final output feature maps.
:return: outputs: flattened output. shape = (batch_size, num_fm)
"""
shape = inputs.get_shape().as_list()
W = weight_variable([shape[1], shape[2], shape[3], num_fm], name="last_conv_w")
b = bias_variable([num_fm], name="last_conv_b")
conv_last = conv2d_basic_VALID(inputs, W, b)
outputs = tf.nn.relu(conv_last, name="last_relu")
return outputs
def batch_norm_affine_transform(x, output_dim, decay=0, name=None, seed=0, is_training=True):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
# initializer = tf.contrib.layers.xavier_initializer(seed=seed)
w = tf.get_variable(name+"_w", [x.get_shape()[1], output_dim], initializer = tf.contrib.layers.xavier_initializer(seed=seed))
b = tf.get_variable(name+"_b", [output_dim], initializer=tf.constant_initializer(0.0))
affine_result = tf.matmul(x, w) + b
batch_norm_result = tf.contrib.layers.batch_norm(affine_result, decay=decay, is_training=is_training,
updates_collections=None)
return batch_norm_result
def bdnn_transform(inputs, w, u):
# """
# :param inputs. shape = (batch_size, feature_size)
# :param w : decide neighbors
# :param u : decide neighbors
# :return: trans_inputs. shape = (batch_size, feature_size*len(neighbors))
# """
neighbors_1 = np.arange(-w, -u, u)
neighbors_2 = np.array([-1, 0, 1])
neighbors_3 = np.arange(1+u, w+1, u)
neighbors = np.concatenate((neighbors_1, neighbors_2, neighbors_3), axis=0)
pad_size = 2*w + inputs.shape[0]
pad_inputs = np.zeros((pad_size, inputs.shape[1]))
pad_inputs[0:inputs.shape[0], :] = inputs
trans_inputs = [np.roll(pad_inputs, -1*neighbors[i], axis=0)[0:inputs.shape[0], :]
for i in range(neighbors.shape[0])]
trans_inputs = np.asarray(trans_inputs)
trans_inputs = np.transpose(trans_inputs, [1, 0, 2])
trans_inputs = np.reshape(trans_inputs, (trans_inputs.shape[0], -1))
return trans_inputs
def bdnn_prediction(batch_size, logits, threshold=0.6, w=19, u=9):
bdnn_batch_size = batch_size + 2*w
result = np.zeros((bdnn_batch_size, 1))
indx = np.arange(bdnn_batch_size) + 1
indx = indx.reshape((bdnn_batch_size, 1))
indx = bdnn_transform(indx, w, u)
indx = indx[w:(bdnn_batch_size-w), :]
indx_list = np.arange(w, bdnn_batch_size - w)
for i in indx_list:
indx_temp = np.where((indx-1) == i)
pred = logits[indx_temp]
pred = np.sum(pred)/pred.shape[0]
result[i] = pred + np.random.rand(1)*1e-4
result = np.trim_zeros(result)
soft_result = np.float32(result)
result = result >= threshold
return result.astype(int), soft_result
def clipped_relu(x, name=None):
b = tf.get_variable(name+'proposed', [1], initializer=tf.constant_initializer(-.5))
x = tf.maximum((x+0.5), 0)
x = tf.minimum(x, 1)
return x
def dense_to_one_hot(labels_dense, num_classes=2):
"""Convert class labels from scalars to one-hot vectors."""
# copied from TensorFlow tutorial
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[(index_offset + labels_dense.ravel()).astype(int)] = 1
return labels_one_hot.astype(np.float32)
def do_validation(m_valid, sess, valid_file_dir, norm_dir, type='DNN'):
# dataset reader setting #
# sys.path.insert(0, prj_dir + '/configure/DNN')
if type is 'DNN':
sys.path.insert(0, os.path.abspath('../../configure/DNN'))
import config as cg
valid_batch_size = cg.batch_size
valid_data_set = dnn_dr.DataReader(valid_file_dir, valid_file_dir+'/Labels', norm_dir, w=cg.w,
u=cg.u, name="eval")
avg_valid_accuracy = 0.
avg_valid_cost = 0.
itr_sum = 0.
accuracy_list = [0 for i in range(valid_data_set._file_len)]
cost_list = [0 for i in range(valid_data_set._file_len)]
itr_file = 0
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(valid_batch_size)
if valid_data_set.file_change_checker():
# print(itr_file)
accuracy_list[itr_file] = avg_valid_accuracy / itr_sum
cost_list[itr_file] = avg_valid_cost / itr_sum
avg_valid_cost = 0.
avg_valid_accuracy = 0.
itr_sum = 0
itr_file += 1
valid_data_set.file_change_initialize()
if valid_data_set.eof_checker():
valid_data_set.reader_initialize()
print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
one_hot_labels = valid_labels.reshape((-1, 1))
one_hot_labels = dense_to_one_hot(one_hot_labels, num_classes=2)
feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: one_hot_labels,
m_valid.keep_probability: 1}
# valid_cost, valid_softpred, valid_raw_labels\
# = sess.run([m_valid.cost, m_valid.softpred, m_valid.raw_labels], feed_dict=feed_dict)
#
# fpr, tpr, thresholds = metrics.roc_curve(valid_raw_labels, valid_softpred, pos_label=1)
# valid_auc = metrics.auc(fpr, tpr)
valid_cost, valid_accuracy = sess.run([m_valid.cost, m_valid.accuracy], feed_dict=feed_dict)
avg_valid_accuracy += valid_accuracy
avg_valid_cost += valid_cost
itr_sum += 1
total_avg_valid_accuracy = np.asscalar(np.mean(np.asarray(accuracy_list)))
total_avg_valid_cost = np.asscalar(np.mean(np.asarray(cost_list)))
elif type is 'bDNN':
sys.path.insert(0, os.path.abspath('../../configure/bDNN'))
import config as cg
valid_batch_size = cg.batch_size
valid_data_set = dr.DataReader(valid_file_dir, valid_file_dir + '/Labels', norm_dir, w=cg.w,
u=cg.u, name="eval")
avg_valid_accuracy = 0.
avg_valid_cost = 0.
itr_sum = 0.
accuracy_list = [0 for i in range(valid_data_set._file_len)]
cost_list = [0 for i in range(valid_data_set._file_len)]
itr_file = 0
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(valid_batch_size)
if valid_data_set.file_change_checker():
# print(itr_file)
accuracy_list[itr_file] = avg_valid_accuracy / itr_sum
cost_list[itr_file] = avg_valid_cost / itr_sum
avg_valid_cost = 0.
avg_valid_accuracy = 0.
itr_sum = 0
itr_file += 1
valid_data_set.file_change_initialize()
if valid_data_set.eof_checker():
valid_data_set.reader_initialize()
print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: valid_labels,
m_valid.keep_probability: 1}
valid_cost, valid_logits = sess.run([m_valid.cost, m_valid.logits], feed_dict=feed_dict)
valid_pred, soft_pred = bdnn_prediction(valid_batch_size, valid_logits, threshold=0.6)
# print(np.sum(valid_pred))
raw_indx = int(np.floor(valid_labels.shape[1] / 2))
raw_labels = valid_labels[:, raw_indx]
raw_labels = raw_labels.reshape((-1, 1))
valid_accuracy = np.equal(valid_pred, raw_labels)
valid_accuracy = valid_accuracy.astype(int)
valid_accuracy = np.sum(valid_accuracy) / valid_batch_size
avg_valid_cost += valid_cost
avg_valid_accuracy += valid_accuracy
itr_sum += 1
total_avg_valid_accuracy = np.asscalar(np.mean(np.asarray(accuracy_list)))
total_avg_valid_cost = np.asscalar(np.mean(np.asarray(cost_list)))
elif type is 'ACAM':
sys.path.insert(0, os.path.abspath('../../configure/ACAM'))
import config as cg
valid_batch_size = cg.batch_size
valid_data_set = dr.DataReader(valid_file_dir, valid_file_dir+'/Labels', norm_dir, w=cg.w,
u=cg.u, name="eval")
avg_valid_accuracy = 0.
avg_valid_cost = 0.
itr_sum = 0.
accuracy_list = [0 for i in range(valid_data_set._file_len)]
cost_list = [0 for i in range(valid_data_set._file_len)]
itr_file = 0
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(valid_batch_size)
if valid_data_set.file_change_checker():
# print(itr_file)
accuracy_list[itr_file] = avg_valid_accuracy / itr_sum
cost_list[itr_file] = avg_valid_cost / itr_sum
avg_valid_cost = 0.
avg_valid_accuracy = 0.
itr_sum = 0
itr_file += 1
valid_data_set.file_change_initialize()
if valid_data_set.eof_checker():
valid_data_set.reader_initialize()
print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: valid_labels,
m_valid.keep_probability: 1}
# valid_cost, valid_softpred, valid_raw_labels\
# = sess.run([m_valid.cost, m_valid.softpred, m_valid.raw_labels], feed_dict=feed_dict)
#
# fpr, tpr, thresholds = metrics.roc_curve(valid_raw_labels, valid_softpred, pos_label=1)
# valid_auc = metrics.auc(fpr, tpr)
valid_cost, valid_accuracy = sess.run([m_valid.cost, m_valid.reward], feed_dict=feed_dict)
avg_valid_accuracy += valid_accuracy
avg_valid_cost += valid_cost
itr_sum += 1
total_avg_valid_accuracy = np.asscalar(np.mean(np.asarray(accuracy_list)))
total_avg_valid_cost = np.asscalar(np.mean(np.asarray(cost_list)))
elif type is 'LSTM':
sys.path.insert(0, os.path.abspath('../../configure/LSTM'))
import config as cg
valid_batch_size = cg.seq_len * cg.num_batches
valid_data_set = rnn_dr.DataReader(valid_file_dir, valid_file_dir+'/Labels', norm_dir, target_delay=cg.target_delay,
name="eval")
avg_valid_accuracy = 0.
avg_valid_cost = 0.
itr_sum = 0.
accuracy_list = [0 for i in range(valid_data_set._file_len)]
cost_list = [0 for i in range(valid_data_set._file_len)]
itr_file = 0
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(valid_batch_size)
if valid_data_set.file_change_checker():
# print(itr_file)
accuracy_list[itr_file] = avg_valid_accuracy / itr_sum
cost_list[itr_file] = avg_valid_cost / itr_sum
avg_valid_cost = 0.
avg_valid_accuracy = 0.
itr_sum = 0
itr_file += 1
valid_data_set.file_change_initialize()
if valid_data_set.eof_checker():
valid_data_set.reader_initialize()
print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
one_hot_labels = valid_labels.reshape((-1, 1))
one_hot_labels = dense_to_one_hot(one_hot_labels, num_classes=2)
feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: one_hot_labels,
m_valid.keep_probability: 1}
# valid_cost, valid_softpred, valid_raw_labels\
# = sess.run([m_valid.cost, m_valid.softpred, m_valid.raw_labels], feed_dict=feed_dict)
#
# fpr, tpr, thresholds = metrics.roc_curve(valid_raw_labels, valid_softpred, pos_label=1)
# valid_auc = metrics.auc(fpr, tpr)
valid_cost, valid_accuracy = sess.run([m_valid.cost, m_valid.accuracy], feed_dict=feed_dict)
avg_valid_accuracy += valid_accuracy
avg_valid_cost += valid_cost
itr_sum += 1
total_avg_valid_accuracy = np.asscalar(np.mean(np.asarray(accuracy_list)))
total_avg_valid_cost = np.asscalar(np.mean(np.asarray(cost_list)))
return total_avg_valid_accuracy, total_avg_valid_cost
| 29,265
| 37.106771
| 129
|
py
|
VAD
|
VAD-master/lib/python/model.py
|
import tensorflow as tf
import numpy as np
import utils as utils
import re
import data_reader_bDNN as dr
from tensorflow.contrib import rnn
SEED = 1
w = 19 # w default = 19
u = 9 # u default = 9
assert (w-1) % u == 0, "w-1 must be divisible by u"
num_features = 768 # for MRCG feature
bdnn_winlen = (((w-1) / u) * 2) + 3
bdnn_inputsize = int(bdnn_winlen * num_features)
bdnn_outputsize = int(bdnn_winlen)
initLr = 0.000605
# initLr = 0.000605 # default : 0.000970598, 0.000605
lrDecayRate = .95
lrDecayFreq = 200 # default : 200
decay = 0.9 # batch normalization decay factor
rf_threshold = 0.5 # 0.5
SMALL_NUM = 1e-5
clip_th = 11 # default : 0.90491669
glimpse_hidden = 128
bp_hidden = 128
glimpse_out = bp_out = 128
nGlimpses = 7 # 7
lstm_cell_size = 128
action_hidden_1 = 256 # default : 256
action_hidden_2 = 256 # default : 256
class Model(object):
def __init__(self, batch_size, reuse=None, is_training=True):
self.cell_outputs = []
self.batch_size = batch_size
self.keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
self.inputs = tf.placeholder(tf.float32, shape=[batch_size, bdnn_inputsize],
name="inputs")
self.labels = tf.placeholder(tf.float32, shape=[batch_size, bdnn_outputsize], name="labels")
self.is_training = is_training
self.mean_bps = []
self.sampled_bps = []
self.baselines = []
self.global_step = tf.Variable(0, trainable=False)
self.lr = tf.train.exponential_decay(initLr, self.global_step, lrDecayFreq, lrDecayRate, staircase=True)
self.raw_reward = 0
# set inference graph
cell_outputs = self.inference(reuse) # (batch_size, bdnn_outputsize)
# set objective function
self.cost, self.reward, self.train_op, self.avg_b, self.rminusb, self.sampled_bps_tensor, self.p_bps,\
self.print_lr, self.soft_result, self.raw_labels = self.calc_reward(cell_outputs)
def inference(self, reuse=None):
# initialization
raw_inputs = self.inputs
batch_size = self.batch_size
keep_prob = self.keep_probability
is_training = self.is_training
tf.set_random_seed(SEED) # initialize the random seed at graph level
lstm_cell = rnn.LayerNormBasicLSTMCell(lstm_cell_size, dropout_keep_prob=keep_prob, reuse=reuse,
dropout_prob_seed=SEED)
# lstm_cell = rnn.BasicRNNCell(lstm_cell_size, reuse=reuse)
initial_state = lstm_cell.zero_state(batch_size, tf.float32)
init_sw = tf.ones([batch_size, int(bdnn_winlen)]) * 0 # start sign
self.mean_bps.append(init_sw)
init_sw = tf.cast(tf.greater(init_sw, 0.4), tf.float32)
self.sampled_bps.append(init_sw)
reuse_recurrent = False
init_glimpse = self.get_glimpse(raw_inputs, init_sw, reuse=reuse_recurrent) # (batch_size, glimpse_out)
inputs = [0] * nGlimpses
outputs = [0] * nGlimpses
glimpse = init_glimpse
for time_step in range(nGlimpses):
if time_step == 0:
with tf.variable_scope("core_network", reuse=reuse_recurrent):
(cell_output, cell_state) = lstm_cell(glimpse, initial_state)
self.cell_outputs.append(initial_state)
else:
reuse_recurrent = True
with tf.variable_scope("core_network", reuse=reuse_recurrent):
(cell_output, cell_state) = lstm_cell(glimpse, cell_state)
inputs[time_step] = glimpse
outputs[time_step] = cell_output
if time_step != nGlimpses - 1: # not final time_step
glimpse = self.get_next_input(cell_output, reuse=reuse_recurrent)
else: # final time_step
with tf.variable_scope("baseline", reuse=reuse_recurrent):
baseline = tf.sigmoid(utils.affine_transform(((cell_output)), 1, name='baseline'))
self.baselines.append(baseline)
return outputs
def get_glimpse(self, inputs, bp, reuse=None):
is_training = self.is_training
with tf.variable_scope("glimpse_net", reuse=reuse):
glimpse_input = sw_sensor(inputs, bp)
act_glimpse_hidden = tf.nn.relu(
utils.batch_norm_affine_transform(glimpse_input, glimpse_hidden, decay=decay,
name='glimpse_hidden', seed=SEED, is_training=is_training))
act_bp_hidden = tf.nn.relu(utils.batch_norm_affine_transform(bp, bp_hidden, decay=decay, name='bp_hidden',
seed=SEED, is_training=is_training))
glimpse_feature = tf.nn.relu(utils.batch_norm_affine_transform(act_glimpse_hidden, glimpse_out, decay=decay,
name='glimpse_out',
seed=SEED, is_training=is_training) +
utils.batch_norm_affine_transform(act_bp_hidden, bp_out, decay=decay,
seed=SEED, name='bp_out', is_training=is_training))
return glimpse_feature
def get_next_input(self, cell_output, reuse=None):
raw_inputs = self.inputs
is_training = self.is_training
with tf.variable_scope("baseline", reuse=reuse):
baseline = tf.sigmoid(utils.affine_transform((((cell_output))), 1, name='baseline'))
self.baselines.append(baseline)
with tf.variable_scope("selection_network", reuse=reuse):
mean_bp = smooth_softmax(
utils.batch_norm_affine_transform((cell_output), int(bdnn_winlen), decay=decay, name='selection',
is_training=is_training))
# mean_bp = softmax(
# utils.batch_norm_affine_transform(cell_output, int(bdnn_winlen), decay=decay, name='selection',
# is_training=is_training), beta)
self.mean_bps.append(mean_bp)
# rand_seq = tf.random_uniform(mean_bp.get_shape().as_list(), minval=0, maxval=1, seed=SEED)
if is_training:
sampled_bp = tf.multinomial(mean_bp, num_samples=1, seed=SEED)
sampled_bp = utils.onehot_tensor(sampled_bp, bdnn_winlen)
else:
sampled_bp = mean_bp
sampled_bp = tf.stop_gradient(sampled_bp)
self.sampled_bps.append(sampled_bp)
return self.get_glimpse(raw_inputs, mean_bp, reuse=True)
def action_network(self, outputs):
is_training = self.is_training
with tf.variable_scope("action_network"):
h1_out = tf.nn.relu(utils.batch_norm_affine_transform(outputs, action_hidden_1,
decay=decay, name='action_hidden_1',
seed=SEED, is_training=is_training))
h1_out = tf.nn.dropout(h1_out, keep_prob=self.keep_probability, seed=SEED)
h2_out = tf.nn.relu(utils.batch_norm_affine_transform(h1_out, action_hidden_2,
decay=decay, name='action_hidden_2', seed=SEED,
is_training=is_training))
h2_out = tf.nn.dropout(h2_out, keep_prob=self.keep_probability, seed=SEED)
return h2_out
def bdnn_prediction(self, logits, threshold):
batch_size_tensor = tf.constant(self.batch_size+2*w, dtype=tf.float32)
th_tenor = tf.constant(threshold, dtype=tf.float32)
result, soft_result = tf.py_func(bdnn_prediction, [batch_size_tensor, logits, th_tenor], Tout=[tf.float32, tf.float32])
return result, soft_result
@staticmethod
def np_trim_zeros(x):
return np.trim_zeros(x)
def calc_reward(self, outputs):
batch_size = self.batch_size
# consider the action at the last time step
outputs = outputs[-1]
outputs = tf.reshape(outputs, (batch_size, lstm_cell_size))
# get the baseline
b = tf.stack(self.baselines)
b = tf.tile(b, [1, 1, 1])
b = tf.reshape(tf.transpose(b, [1, 0, 2]), [batch_size, nGlimpses])
no_grad_b = tf.stop_gradient(b)
# get the action
action_out = self.action_network(outputs)
logits = tf.sigmoid(utils.affine_transform(action_out, int(bdnn_outputsize), seed=SEED, name="softmax"))
result, soft_result = self.bdnn_prediction(logits, threshold=rf_threshold)
# convert list of tensors to one big tensor
mean_bps = tf.concat(axis=0, values=self.mean_bps)
mean_bps = tf.reshape(mean_bps, (nGlimpses, self.batch_size, int(bdnn_winlen)))
mean_bps = tf.transpose(mean_bps, [1, 0, 2])
sampled_bps = tf.concat(axis=0, values=self.sampled_bps)
sampled_bps = tf.reshape(sampled_bps, (nGlimpses, self.batch_size, int(bdnn_winlen)))
sampled_bps = tf.transpose(sampled_bps, [1, 0, 2])
# reward for all examples in the batch
raw_indx = int(np.floor(bdnn_outputsize / 2))
raw_labels = self.labels[:, raw_indx]
raw_labels = tf.reshape(raw_labels, shape=(-1, 1))
R = tf.cast(tf.equal(result, raw_labels), tf.float32)
soft_R = tf.stop_gradient(tf.cast(tf.abs(tf.subtract(1 - soft_result, raw_labels)), tf.float32))
soft_R = tf.reshape(soft_R, (batch_size, 1))
soft_R = tf.tile(soft_R, [1, nGlimpses])
# R = tf.cast(tf.abs(tf.subtract(1 - soft_result, raw_labels)), tf.float32)
R = tf.stop_gradient(R)
R = tf.reshape(R, (batch_size, 1))
self.raw_reward = R
R = tf.tile(R, [1, nGlimpses])
reward = tf.reduce_mean(R)
# select the window
p_bps = multinomial_pmf(mean_bps, sampled_bps)
p_bps = tf.reshape(p_bps, (self.batch_size, nGlimpses))
# define the cost function
sv_part = -tf.square(self.labels - logits)
# rf_part = tf.log(p_bps + SMALL_NUM) * (soft_R - no_grad_b)
rf_part = tf.log(p_bps + SMALL_NUM) * (R - no_grad_b)
# J = sv_part
J = tf.concat(axis=1, values=[sv_part, rf_part]) # comment for sv only
J = tf.reduce_sum(J, 1)
J = J - tf.reduce_mean(tf.square(R - b), 1) # comment for sv only
J = tf.reduce_mean(J, 0)
# cost = -J
cost = -tf.reduce_mean(J)
var_list = tf.trainable_variables()
grads = tf.gradients(cost, var_list)
grads, _ = tf.clip_by_global_norm(grads, clip_th)
optimizer = tf.train.AdamOptimizer(self.lr)
train_op = optimizer.apply_gradients(zip(grads, var_list), global_step=self.global_step)
return cost, reward, train_op, tf.reduce_mean(b), tf.reduce_mean(R - b), \
sampled_bps, tf.reduce_mean(p_bps), self.lr, soft_result, raw_labels
def smooth_softmax(x):
return tf.sigmoid(x) / tf.expand_dims(tf.reduce_sum(tf.sigmoid(x), axis=1), axis=1)
def softmax(x, b):
return tf.exp(b*x) / tf.expand_dims(tf.reduce_sum(tf.exp(b*x), axis=1), axis=1)
def sw_sensor(inputs, bp):
bp = tf.expand_dims(bp, axis=2)
bp = tf.tile(bp, (1, 1, num_features))
bp = tf.reshape(bp, (inputs.get_shape()[0].value, -1, 1))
bp = tf.squeeze(bp)
sw = bp * inputs
return sw
def multinomial_pmf(mean, sample):
"""
calculate the probability of bernoulli process
:param mean: mean. shape = (batch_size, num_sbs)
:param sample: sample. shape = (batch_size, num_sbs)
:return: p_br: shape = (batch_size, num_sbs)
"""
p_br = tf.reduce_prod(tf.pow(mean, sample), axis=2)
return p_br
def bdnn_prediction(bdnn_batch_size, logits, threshold=0.5):
result = np.zeros((int(bdnn_batch_size), 1))
indx = np.arange(int(bdnn_batch_size)) + 1
indx = indx.reshape((int(bdnn_batch_size), 1))
indx = utils.bdnn_transform(indx, w, u)
indx = indx[w:(int(bdnn_batch_size)-w), :]
indx_list = np.arange(w, int(bdnn_batch_size) - w)
for i in indx_list:
indx_temp = np.where((indx-1) == i)
pred = logits[indx_temp]
pred = np.sum(pred)/pred.shape[0]
result[i] = pred
result = np.trim_zeros(result)
soft_result = np.float32(result)
result = np.float32(result) >= threshold
return result.astype(np.float32), soft_result
def summary_generation(eval_file_dir):
summary_dic = {}
noise_list = os.listdir(eval_file_dir)
noise_list = sorted(noise_list)
summary_dic["summary_ph"] = summary_ph = tf.placeholder(dtype=tf.float32)
for name in noise_list:
with tf.variable_scope(name):
for summary_name in summary_list:
summary_dic[name+"_"+summary_name] = tf.summary.scalar(summary_name, summary_ph)
with tf.variable_scope("Averaged_Results"):
summary_dic["cost_across_all_noise_types"] = tf.summary.scalar("cost_across_all_noise_types", summary_ph)
summary_dic["accuracy_across_all_noise_types"]\
= tf.summary.scalar("accuracy_across_all_noise_types", summary_ph)
summary_dic["variance_across_all_noise_types"]\
= tf.summary.scalar("variance_across_all_noise_types", summary_ph)
return summary_dic
def dense_to_one_hot(labels_dense, num_classes=2):
"""Convert class labels from scalars to one-hot vectors."""
# copied from TensorFlow tutorial
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[(index_offset + labels_dense.ravel()).astype(int)] = 1
return labels_one_hot.astype(np.float32)
| 14,036
| 37.563187
| 127
|
py
|
VAD
|
VAD-master/lib/python/VAD_LSTM.py
|
import tensorflow as tf
import numpy as np
import utils_jskim as utils
import re
import data_reader_RNN as dr
import os
import matplotlib.pyplot as plt
from tensorflow.contrib import rnn
from sklearn import metrics
import time
FLAGS = tf.flags.FLAGS
SEED = 1
tf.set_random_seed(SEED)
tf.flags.DEFINE_string('mode', "test", "mode : train/ test [default : train]")
file_dir = "/home/sbie/storage/VAD_Database/SE_TIMIT_MRCG_0328"
input_dir = file_dir
output_dir = file_dir + "/Labels"
valid_file_dir = "/home/sbie/storage/VAD_Database/NX_TIMIT_MRCG_small"
test_file_dir = "/home/sbie/storage2/VAD_Database/NX_TIMIT_MRCG_small"
norm_dir = input_dir
logs_dir = "/home/sbie/github/VAD_Project_test/VAD_LSTM/logs_LSTM"
initial_logs_dir = "/home/sbie/github/VAD_Project_test/VAD_LSTM/logs_LSTM"
ckpt_name = "/model.ckpt-12000"
reset = False # remove all existed logs and initialize log directories
device = "/gpu:1"
mode = 'test'
if mode is 'test':
reset = False
if reset:
os.popen('rm -rf ' + logs_dir + '/*')
os.popen('mkdir ' + logs_dir + '/train')
os.popen('mkdir ' + logs_dir + '/valid')
summary_list = ["cost", "accuracy_SNR_-5", "accuracy_SNR_0", "accuracy_SNR_5", "accuracy_SNR_10",
"accuracy_across_all_SNRs"]
learning_rate = 0.0001
eval_num_batches = 2e4
SMALL_NUM = 1e-4
max_epoch = int(1e5)
dropout_rate = 0.5
decay = 0.9 # batch normalization decay factor
w = 19 # w default = 19
u = 9 # u default = 9
eval_th = 0.6
th = 0.5
lstm_cell_size = 512
num_hidden_1 = 128
num_hidden_2 = 256
num_hidden_3 = 128
seq_size = 40
batch_size = 1*seq_size # batch_size = 32
valid_batch_size = batch_size
assert (w-1) % u == 0, "w-1 must be divisible by u"
width = 768
num_features = 768 # MRCG feature
bdnn_winlen = (((w-1) / u) * 2) + 3
# bdnn_inputsize = int(bdnn_winlen * num_features)
bdnn_inputsize = num_features
bdnn_outputsize = 2#int(bdnn_winlen)
initLr = 1e-5
scope_name = 'RNN_scope'
eval_type = 2
def test_config(c_test_dir, c_norm_dir, c_initial_logs_dir, c_batch_size_eval, c_data_len):
global test_file_dir
global norm_dir
global initial_logs_dir
global ckpt_name
global valid_batch_size
global data_len
test_file_dir = c_test_dir
norm_dir = c_norm_dir
initial_logs_dir = c_initial_logs_dir
valid_batch_size = c_batch_size_eval
data_len = c_data_len
def affine_transform(x, output_dim, name=None):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
w = tf.get_variable(name + "_w", [x.get_shape()[1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable(name + "_b", [output_dim], initializer=tf.constant_initializer(0.0))
return tf.matmul(x, w) + b
def inference(inputs, keep_prob, is_training=True, reuse=None):
# initialization
# c1_out = affine_transform(inputs, num_hidden_1, name="hidden_1")
# inputs_shape = inputs.get_shape().as_list()
with tf.variable_scope(scope_name):
print('inference time')
print(inputs.get_shape().as_list())
in_rnn = tf.reshape(inputs,[-1, seq_size+w, num_features])
stacked_rnn = []
for iiLyr in range(3):
stacked_rnn.append(tf.nn.rnn_cell.LSTMCell(num_units=lstm_cell_size, state_is_tuple=True))
MultiLyr_cell = tf.nn.rnn_cell.MultiRNNCell(cells=stacked_rnn, state_is_tuple=True)
outputs, _state = tf.nn.dynamic_rnn(MultiLyr_cell, in_rnn, time_major=False, dtype=tf.float32)
outputs = tf.reshape(outputs,[-1,lstm_cell_size])
outputs = tf.nn.dropout(outputs, keep_prob=keep_prob)
# # h1_out = affine_transform(inputs, num_hidden_1, name="hidden_1")
# lh1_out = utils.batch_norm_affine_transform(outputs, num_hidden_1, name="lhidden_1", decay=decay,
# is_training=is_training)
# lh1_out = tf.nn.relu(lh1_out)
# lh1_out = tf.nn.dropout(lh1_out, keep_prob=keep_prob)
logits = affine_transform(outputs, bdnn_outputsize, name="output1")
# logits = tf.sigmoid(logits)
logits = tf.reshape(logits, [-1, int(bdnn_outputsize)])
print(logits.get_shape().as_list())
print('asdf')
return logits[w:batch_size+w,:]
def train(loss_val, var_list):
lrDecayRate = .95
lrDecayFreq = 200
global_step = tf.Variable(0, trainable=False)
lr = tf.train.exponential_decay(initLr, global_step, lrDecayFreq, lrDecayRate, staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
grads = optimizer.compute_gradients(loss_val, var_list=var_list)
return optimizer.apply_gradients(grads, global_step=global_step)
def bdnn_prediction(bdnn_batch_size, logits, threshold=th):
result = np.zeros((bdnn_batch_size, 1))
indx = np.arange(bdnn_batch_size) + 1
indx = indx.reshape((bdnn_batch_size, 1))
indx = utils.bdnn_transform(indx, w, u)
indx = indx[w:(bdnn_batch_size-w), :]
indx_list = np.arange(w, bdnn_batch_size - w)
for i in indx_list:
indx_temp = np.where((indx-1) == i)
pred = logits[indx_temp]
pred = np.sum(pred)/pred.shape[0]
result[i] = pred
result = np.trim_zeros(result)
result = result >= threshold
return result.astype(int)
def summary_generation(eval_file_dir):
summary_dic = {}
noise_list = os.listdir(eval_file_dir)
noise_list = sorted(noise_list)
summary_dic["summary_ph"] = summary_ph = tf.placeholder(dtype=tf.float32)
for name in noise_list:
with tf.variable_scope(name):
for summary_name in summary_list:
summary_dic[name+"_"+summary_name] = tf.summary.scalar(summary_name, summary_ph)
with tf.variable_scope("Averaged_Results"):
summary_dic["cost_across_all_noise_types"] = tf.summary.scalar("cost_across_all_noise_types", summary_ph)
summary_dic["accuracy_across_all_noise_types"]\
= tf.summary.scalar("accuracy_across_all_noise_types", summary_ph)
summary_dic["variance_across_all_noise_types"]\
= tf.summary.scalar("variance_across_all_noise_types", summary_ph)
summary_dic["AUC_across_all_noise_types"]\
= tf.summary.scalar("AUC_across_all_noise_types", summary_ph)
return summary_dic
def full_evaluation(m_eval, sess_eval, batch_size_eval, eval_file_dir, summary_writer, summary_dic, itr):
mean_cost = []
mean_accuracy = []
mean_auc = []
mean_time = []
print("-------- Performance for each of noise types --------")
noise_list = os.listdir(eval_file_dir)
noise_list = sorted(noise_list)
summary_ph = summary_dic["summary_ph"]
for i in range(len(noise_list)):
print("full time evaluation, now loading : %d",i)
noise_name = '/' + noise_list[i]
eval_input_dir = eval_file_dir + noise_name
eval_output_dir = eval_file_dir + noise_name + '/Labels'
##########################################
eval_calc_dir = eval_file_dir + noise_name + '/test_result' # for Final layer information saving
##########################################
eval_data_set = dr.DataReader(eval_input_dir, eval_output_dir, norm_dir, w=w, u=u, name="eval")
eval_cost, eval_accuracy, eval_list, eval_auc, auc_list, eval_time = evaluation(m_eval, eval_data_set, sess_eval, batch_size_eval, noise_list[i], save_dir = eval_calc_dir)
print("--noise type : " + noise_list[i])
print("cost: %.3f, accuracy across all SNRs: %.3f, auc across all SNRs: %.3f " % (eval_cost, eval_accuracy, eval_auc))
print('accuracy wrt SNR:')
print('SNR_-5 : %.3f, SNR_0 : %.3f, SNR_5 : %.3f, SNR_10 : %.3f' % (eval_list[0], eval_list[1],
eval_list[2], eval_list[3]))
print('AUC wrt SNR:')
print('SNR_-5 : %.3f, SNR_0 : %.3f, SNR_5 : %.3f, SNR_10 : %.3f' % (auc_list[0], auc_list[1],
auc_list[2], auc_list[3]))
eval_summary_list = [eval_cost] + eval_list + [eval_accuracy] + [eval_auc]
for j, summary_name in enumerate(summary_list):
summary_str = sess_eval.run(summary_dic[noise_list[i]+"_"+summary_name],
feed_dict={summary_ph: eval_summary_list[j]})
summary_writer.add_summary(summary_str, itr)
mean_cost.append(eval_cost)
mean_accuracy.append(eval_accuracy)
mean_auc.append(eval_auc)
mean_time.append(eval_time)
mean_cost = np.mean(np.asarray(mean_cost))
var_accuracy = np.var(np.asarray(mean_accuracy))
mean_accuracy = np.mean(np.asarray(mean_accuracy))
mean_auc = np.mean(np.asarray(mean_auc))
mean_time = np.mean(np.asarray(mean_time))
summary_writer.add_summary(sess_eval.run(summary_dic["cost_across_all_noise_types"],
feed_dict={summary_ph: mean_cost}), itr)
summary_writer.add_summary(sess_eval.run(summary_dic["accuracy_across_all_noise_types"],
feed_dict={summary_ph: mean_accuracy}), itr)
summary_writer.add_summary(sess_eval.run(summary_dic["variance_across_all_noise_types"],
feed_dict={summary_ph: var_accuracy}), itr)
summary_writer.add_summary(sess_eval.run(summary_dic["AUC_across_all_noise_types"],
feed_dict={summary_ph: mean_auc}), itr)
print("-------- Performance across all of noise types --------")
print("cost : %.3f" % mean_cost)
print("******* averaged accuracy across all noise_types : %.3f *******" % mean_accuracy)
print("******* variance of accuracies across all noise_types : %6.6f *******" % var_accuracy)
print("******* variance of AUC across all noise_types : %6.6f *******" % mean_auc)
print("******* mean time : %6.6f *******" % mean_time)
return mean_accuracy
def evaluation(m_valid, valid_data_set, sess, eval_batch_size, noise_name, num_batches=eval_num_batches,
save_dir = None):
# num_samples = valid_data_set.num_samples
# num_batches = num_samples / batch_size
avg_valid_cost = 0.
avg_valid_accuracy = 0.
avg_valid_time = 0.
# AUC = 0.
itr_sum = 0.
file_num_before = -1
accuracy_list = [0 for i in range(valid_data_set._file_len)]
cost_list = [0 for i in range(valid_data_set._file_len)]
auc_list = [0 for i in range(valid_data_set._file_len)]
time_list = [0 for i in range(valid_data_set._file_len)]
itr_file = 0
channel_values = []
save_calc_dir = ''
valid_name_before = ''
# plt.figure()
while True:
# valid_name_before = save_dir + 'test'+ [file_num_before] + '.txt'
valid_inputs, valid_labels = valid_data_set.next_batch(eval_batch_size)
if valid_data_set.file_change_checker():
accuracy_list[itr_file] = avg_valid_accuracy / itr_sum
cost_list[itr_file] = avg_valid_cost / itr_sum
auc_list[itr_file] = utils.plot_ROC2(channel_values,file_num_before, noise_name)
time_list[itr_file] = avg_valid_time / ((itr_sum*batch_size)/16000)
avg_valid_accuracy = 0.
avg_valid_cost = 0.
avg_valid_time = 0.
channel_values = []
# avg_valid_auc = 0.
itr_sum = 0
itr_file += 1
valid_data_set.file_change_initialize()
if valid_data_set.eof_checker():
#######
# AUC = utils.plot_ROC2(valid_name_before, save_calc_dir,file_num_before, noise_name)
# f_eval.close()
#######
valid_data_set.reader_initialize()
print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
# if eval_batch_size * itr_file > 5000:
# f_eval.close()
# break
one_hot_vlabels = valid_labels.reshape((-1, 1))
one_hot_vlabels = dense_to_one_hot(one_hot_vlabels, num_classes=2)
feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: one_hot_vlabels,
m_valid.keep_probability: 1}
start_time = time.time()
logits_val = sess.run(m_valid.logits, feed_dict=feed_dict)
check_time = time.time()-start_time
valid_cost, valid_accuracy = sess.run([m_valid.cost, m_valid.accuracy], feed_dict=feed_dict)
# print(valid_labels.shape)
# print(logits_val)
save_val = np.concatenate([logits_val,valid_labels], axis = 1)
###############################################
file_num = valid_data_set._num_file
channel_values.append(save_val)
# if file_num == file_num_before:
# with open(valid_calc_name,'a') as f_eval :
# np.savetxt(f_eval,save_val)
# # f_eval.write(logits_val)
# else:
# # if file_num > 0:
# # AUC = utils.plot_ROC2(valid_name_before,save_calc_dir,file_num_before, noise_name)
# with open(valid_calc_name,'w') as f_eval :
# np.savetxt(f_eval,save_val)
avg_valid_cost += valid_cost
avg_valid_accuracy += valid_accuracy
avg_valid_time += check_time
# avg_valid_auc += valid_auc
itr_sum += 1
############################################
file_num_before = valid_data_set._num_file
############################################
total_avg_valid_cost = np.asscalar(np.mean(np.asarray(cost_list)))
total_avg_valid_accuracy = np.asscalar(np.mean(np.asarray(accuracy_list)))
total_avg_valid_auc = np.asscalar(np.mean(np.asarray(auc_list)))
total_avg_valid_time = np.asscalar(np.mean(np.asarray(time_list)))
return total_avg_valid_cost, total_avg_valid_accuracy, accuracy_list, total_avg_valid_auc, auc_list, total_avg_valid_time
def dense_to_one_hot(labels_dense, num_classes=2):
"""Convert class labels from scalars to one-hot vectors."""
# copied from TensorFlow tutorial
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[(index_offset + labels_dense.ravel()).astype(int)] = 1
return labels_one_hot.astype(np.float32)
class Model(object):
def __init__(self, is_training=True):
self.keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
self.inputs = inputs = tf.placeholder(tf.float32, shape=[batch_size + w, bdnn_inputsize],
name="inputs")
self.labels = labels = tf.placeholder(tf.float32, shape=[batch_size, bdnn_outputsize], name="labels")
# set inference graph
self.logits = logits = inference(inputs, self.keep_probability, is_training=is_training) # (batch_size, bdnn_outputsize)
# set objective function
# self.cost = cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits))
pred = tf.argmax(logits, axis=1, name="prediction")
pred = tf.cast(pred, tf.int32)
truth = tf.cast(labels[:, 1], tf.int32)
log_one = logits[:, 1]
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(pred, truth), tf.float32))
self.cost = cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits = logits))
# self.cost = cost = tf.reduce_mean(tf.square(labels - logits))
# fpr, tpr, thresholds = metrics.roc_curve(np.array(truth), np.array(pred), pos_label=2)
# self.auc = metrics.auc(fpr, tpr)
# cost = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
# cost = tf.reduce_sum(tf.square(labels - logits), axis=1)
# self.cost = cost = tf.reduce_mean(cost)
# self.sigm = tf.sigmoid(logits)
# set training strategy
trainable_var = tf.trainable_variables()
self.train_op = train(cost, trainable_var)
def main(argv=None):
mean_acc = 0
# Graph Part #
print("Graph initialization...")
with tf.device(device):
with tf.variable_scope("model", reuse=None):
m_train = Model(is_training=True)
with tf.variable_scope("model", reuse=True):
m_valid = Model(is_training=False)
print("Done")
# Summary Part #
print("Setting up summary op...")
summary_ph = tf.placeholder(dtype=tf.float32)
with tf.variable_scope("Aggregated_Training_Parts"):
cost_summary_op = tf.summary.scalar("cost", summary_ph)
accuracy_summary_op = tf.summary.scalar("accuracy", summary_ph)
train_summary_writer = tf.summary.FileWriter(logs_dir + '/train/', max_queue=4)
valid_summary_writer = tf.summary.FileWriter(logs_dir + '/valid/', max_queue=4)
summary_dic = summary_generation(valid_file_dir)
print("Done")
# Model Save Part #
print("Setting up Saver...")
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(initial_logs_dir)
print("Done")
# Session Part #
sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
sess_config.gpu_options.allow_growth = True
sess = tf.Session(config=sess_config)
if ckpt and ckpt.model_checkpoint_path: # model restore
print("Model restored...")
saver.restore(sess, initial_logs_dir+ckpt_name)
print("Done")
else:
sess.run(tf.global_variables_initializer()) # if the checkpoint doesn't exist, do initialization
train_data_set = dr.DataReader(input_dir, output_dir, norm_dir, w=w, u=u, name="train") # training data reader initialization
if mode is 'train':
for itr in range(max_epoch):
train_inputs, train_labels = train_data_set.next_batch(batch_size)
one_hot_labels = train_labels.reshape((-1, 1))
one_hot_labels = dense_to_one_hot(one_hot_labels, num_classes=2)
feed_dict = {m_train.inputs: train_inputs, m_train.labels: one_hot_labels,
m_train.keep_probability: dropout_rate}
sess.run(m_train.train_op, feed_dict=feed_dict)
if itr % 50 == 0 and itr >= 0:
train_cost, train_accuracy = sess.run([m_train.cost, m_train.accuracy], feed_dict=feed_dict)
# train_cost, logits = sess.run([m_train.cost, m_train.logits], feed_dict=feed_dict)
#
# result = bdnn_prediction(batch_size, logits, threshold=th)
# raw_indx = int(np.floor(train_labels.shape[1] / 2))
# raw_labels = train_labels[:, raw_indx]
# raw_labels = raw_labels.reshape((-1, 1))
# train_accuracy = np.equal(result, raw_labels)
# train_accuracy = train_accuracy.astype(int)
# train_accuracy = np.sum(train_accuracy) / batch_size # change to mean...
print("Step: %d, train_cost: %.3f, train_accuracy=%3.3f" % (itr, train_cost, train_accuracy))
train_cost_summary_str = sess.run(cost_summary_op, feed_dict={summary_ph: train_cost})
train_accuracy_summary_str = sess.run(accuracy_summary_op, feed_dict={summary_ph: train_accuracy})
train_summary_writer.add_summary(train_cost_summary_str, itr) # write the train phase summary to event files
train_summary_writer.add_summary(train_accuracy_summary_str, itr)
# if train_data_set.eof_checker():
if itr % 1000 == 0 and itr > 0:
saver.save(sess, logs_dir + "/model.ckpt", itr) # model save
print('validation start!')
mean_acc = full_evaluation(m_valid, sess, valid_batch_size, valid_file_dir,
valid_summary_writer, summary_dic, itr)
if mean_acc > 0.88:
print('finish!!')
break
# train_data_set.reader_initialize()
# print('Train data reader was initialized!') # initialize eof flag & num_file & start index
elif mode is 'test':
final_softout, final_label = utils.vad_test3(m_valid, sess, valid_batch_size, test_file_dir, norm_dir, data_len,
eval_type)
if data_len is None:
return final_softout, final_label
else:
return final_softout[0:data_len, :], final_label[0:data_len, :]
if __name__ == "__main__":
tf.app.run()
| 20,915
| 38.389831
| 180
|
py
|
VAD
|
VAD-master/lib/python/temp_save.py
|
import graph_save as gs
prj_dir = '/home/sbie/storage3/github/VAD_Toolkit/VAD'
gs.freeze_graph(prj_dir + '/saved_model/temp/temp_LSTM', prj_dir + '/saved_model/graph/LSTM', 'model_1/soft_pred,model_1/raw_labels')
# gs.freeze_graph(prj_dir + '/saved_model/temp_ACAM', prj_dir + '/saved_model/ACAM', 'model_1/logits,model_1/raw_labels')
| 338
| 41.375
| 133
|
py
|
VAD
|
VAD-master/lib/python/data_reader_bDNN.py
|
import numpy as np
import os
import glob
import utils
import scipy.io as sio
class DataReader(object):
def __init__(self, input_dir, output_dir, norm_dir, w=19, u=9, name=None, pad=None):
# print(name + " data reader initialization...")
self._input_dir = input_dir
self._output_dir = output_dir
self._norm_dir = norm_dir
self._input_file_list = sorted(glob.glob(input_dir+'/*.bin'))
self._input_spec_list = sorted(glob.glob(input_dir+'/*.txt'))
self._output_file_list = sorted(glob.glob(output_dir+'/*.bin'))
self._file_len = len(self._input_file_list)
self._name = name
assert self._file_len == len(self._output_file_list), "# input files and output file is not matched"
self._epoch = 1
self._num_file = 0
self._start_idx = 0
if pad is not None:
self._inputs = self._read_input(self._input_file_list[self._num_file], self._input_spec_list[self._num_file])
self._inputs = np.concatenate((self._inputs, np.zeros((pad, self._inputs.shape[1]), dtype=np.float32)))
self._outputs = self._read_output(self._output_file_list[self._num_file])
self._outputs = np.concatenate((self._outputs, np.zeros((pad, self._outputs.shape[1]), dtype=np.float32)))
else:
self._inputs = self._read_input(self._input_file_list[self._num_file], self._input_spec_list[self._num_file])
self._outputs = self._read_output(self._output_file_list[self._num_file])
self._w = w
self._u = u
self.eof = False
self.file_change = False
self._outputs = self._outputs[0:self._inputs.shape[0]]
assert np.shape(self._inputs)[0] == np.shape(self._outputs)[0], \
("# samples is not matched between input: %d and output: %d files"
% (np.shape(self._inputs)[0], np.shape(self._outputs)[0]))
self.num_samples = np.shape(self._outputs)[0]
norm_param = sio.loadmat(self._norm_dir+'/global_normalize_factor.mat')
self.train_mean = norm_param['global_mean']
self.train_std = norm_param['global_std']
self.raw_inputs = 0 # adding part
# print("Done.")
# print("BOF : " + self._name + " file_" + str(self._num_file).zfill(2))
def _binary_read_with_shape(self):
pass
@staticmethod
def _read_input(input_file_dir, input_spec_dir):
data = np.fromfile(input_file_dir, dtype=np.float32) # (# total frame, feature_size)
with open(input_spec_dir,'r') as f:
spec = f.readline()
size = spec.split(',')
data = data.reshape((int(size[0]), int(size[1])), order='F')
return data
@staticmethod
def _read_output(output_file_dir):
data = np.fromfile(output_file_dir, dtype=np.float32) # data shape : (# total frame,)
data = data.reshape(-1, 1) # data shape : (# total frame, 1)
return data
def next_batch(self, batch_size):
if self._start_idx + batch_size > self.num_samples:
self._start_idx = 0
self.file_change = True
self._num_file += 1
# print("EOF : " + self._name + " file_" + str(self._num_file-1).zfill(2) +
# " -> BOF : " + self._name + " file_" + str(self._num_file).zfill(2))
if self._num_file > self._file_len - 1:
self.eof = True
self._num_file = 0
# print("EOF : last " + self._name + " file. " + "-> BOF : " + self._name + " file_" +
# str(self._num_file).zfill(2))
self._inputs = self._read_input(self._input_file_list[self._num_file], self._input_spec_list[self._num_file])
self._outputs = self._read_output(self._output_file_list[self._num_file])
data_len = np.shape(self._inputs)[0]
self._outputs = self._outputs[0:data_len, :]
assert np.shape(self._inputs)[0] == np.shape(self._outputs)[0], \
("# samples is not matched between input: %d and output: %d files"
% (np.shape(self._inputs)[0], np.shape(self._outputs)[0]))
self.num_samples = np.shape(self._outputs)[0]
else:
self.file_change = False
self.eof = False
inputs = self._inputs[self._start_idx:self._start_idx + batch_size, :]
self.raw_inputs = inputs # adding part
inputs = self.normalize(inputs)
inputs = utils.bdnn_transform(inputs, self._w, self._u)
# inputs = inputs[self._w: (batch_size-self._w), :]
outputs = self._outputs[self._start_idx:self._start_idx + batch_size, :]
outputs = utils.bdnn_transform(outputs, self._w, self._u)
# outputs = outputs[self._w: (batch_size - self._w), :]
self._start_idx += batch_size
return inputs, outputs
def normalize(self, x):
x = (x - self.train_mean)/self.train_std
return x
def reader_initialize(self):
self._num_file = 0
self._start_idx = 0
self.eof = False
def eof_checker(self):
return self.eof
def file_change_checker(self):
return self.file_change
def file_change_initialize(self):
self.file_change = False
def set_random_batch(self, batch_size):
self._start_idx = np.maximum(0, np.random.random_integers(self.num_samples - batch_size))
def dense_to_one_hot(labels_dense, num_classes=2):
"""Convert class labels from scalars to one-hot vectors."""
# copied from TensorFlow tutorial
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
| 5,838
| 37.668874
| 121
|
py
|
VAD
|
VAD-master/lib/python/update_ckpt.py
|
import sys
sys.path.insert(0, './lib/python')
import VAD_Proposed as Vp
import VAD_DNN as Vd
import VAD_bDNN as Vb
import VAD_LSTM_2 as Vl
import scipy.io as sio
import os, getopt
import glob
# norm_dir = "./norm_data"
# data_dir = "./sample_data"
# ckpt_name = '/model9918and41.ckpt-2'
# model_dir = "./saved_model"
# valid_batch_size = 4134
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], 'hu:', ["model=", "prj_dir="])
except getopt.GetoptError as err:
print(str(err))
sys.exit(1)
if len(opts) != 3:
print("arguments are not enough.")
sys.exit(1)
for opt, arg in opts:
if opt == '-h':
sys.exit(0)
if opt == '-u':
update = int(arg)
elif opt == '--model':
model = str(arg)
elif opt == '--prj_dir':
prj_dir = str(arg)
logs_dir = prj_dir + '/logs'
if update == 1:
ckpt_list = sorted(glob.glob(logs_dir + '/model*'))
new_ckpt = old_ckpt= ckpt_list[-3:]
new_ckpt = [(ckpt_name.replace('model', model)) for ckpt_name in new_ckpt]
save_dir = [prj_dir + '/saved_model/' + os.path.basename(ckpt_name) for ckpt_name in new_ckpt]
save_dir = [ckpt_name.split('.')[0] + '.' + ckpt_name.split('.')[-1] for ckpt_name in save_dir]
for x, y, z in zip(old_ckpt, new_ckpt, save_dir):
os.system('cp -f ' + x + ' ' + y)
os.system('mv -f ' + y + ' ' + z)
print("checkpoint update done!")
else:
backup_dir = prj_dir + '/saved_model/backup_ckpt'
backup_ckpt = sorted(glob.glob(backup_dir + '/' + model + '*'))
restore_ckpt = [prj_dir + '/saved_model/' + os.path.basename(ckpt_name) for ckpt_name in backup_ckpt]
for x, y in zip(backup_ckpt, restore_ckpt):
os.system('cp -f ' + x + ' ' + y)
backup_norm_dir = prj_dir + '/norm_data/backup_norm/global_normalize_factor.mat'
norm_dir = prj_dir + '/norm_data/global_normalize_factor.mat'
os.system('cp -f ' + backup_norm_dir + ' ' + norm_dir)
print("checkpoint restore done!")
| 2,142
| 30.985075
| 109
|
py
|
VAD
|
VAD-master/lib/python/path_setting.py
|
class PathSetting(object):
def __init__(self, prj_dir, model):
save_dir = prj_dir + '/data/feat'
train_dir = save_dir + '/train'
valid_dir = save_dir + '/valid'
logs_dir = prj_dir + '/logs/' + model
self.logs_dir = logs_dir
self.initial_logs_dir = logs_dir
self.input_dir = train_dir
self.output_dir = train_dir+'/Labels'
self.norm_dir = train_dir
self.valid_file_dir = valid_dir
| 468
| 26.588235
| 45
|
py
|
VAD
|
VAD-master/lib/python/VAD_test.py
|
import sys
sys.path.insert(0, './lib/python')
import VAD_Proposed as Vp
import VAD_DNN as Vd
import VAD_bDNN as Vb
import VAD_LSTM_2 as Vl
import scipy.io as sio
import graph_test as graph_test
import os, getopt
import glob
from time import time
# norm_dir = "./norm_data"
# data_dir = "./sample_data"
# ckpt_name = '/model9918and41.ckpt-2'
# model_dir = "./saved_model"
# valid_batch_size = 4134
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], 'hm:l:d:', ["data_dir=", "norm_dir=", "model_dir="])
except getopt.GetoptError as err:
print(str(err))
sys.exit(1)
if len(opts) != 6:
print("arguments are not enough.")
sys.exit(1)
for opt, arg in opts:
if opt == '-h':
sys.exit(0)
elif opt == '-m':
mode = int(arg)
elif opt == '-l':
data_len = int(arg)
elif opt == '-d':
is_default = int(arg)
elif opt == '--data_dir':
data_dir = str(arg)
elif opt == '--norm_dir':
norm_dir = str(arg)
elif opt == '--model_dir':
model_dir = str(arg)
if mode == 0:
# Vp.test_config(c_test_dir=data_dir,
# c_norm_dir=norm_dir,
# c_initial_logs_dir=model_dir, c_batch_size_eval=batch_size,
# c_data_len=data_len)
#
# pred, label = Vp.main()
if is_default:
graph_list = sorted(glob.glob(model_dir + '/backup/backup_pb/frozen_model_ACAM.pb'))
norm_dir = model_dir + '/backup/backup_norm'
pred, label = graph_test.do_test(graph_list[-1], data_dir, norm_dir, data_len, is_default, mode)
else:
graph_list = sorted(glob.glob(model_dir + '/graph/ACAM/*.pb'))
print(graph_list)
pred, label = graph_test.do_test(graph_list[-1], data_dir, norm_dir, data_len, is_default, mode)
elif mode == 1:
print(os.path.abspath('./configure/bDNN'))
sys.path.insert(0, os.path.abspath('./configure/bDNN'))
import config as cg
if is_default:
graph_list = sorted(glob.glob(model_dir + '/backup/backup_pb/frozen_model_bDNN.pb'))
norm_dir = model_dir + '/backup/backup_norm'
pred, label = graph_test.do_test(graph_list[-1], data_dir, norm_dir, data_len, is_default, mode)
else:
graph_list = sorted(glob.glob(model_dir + '/graph/bDNN/*.pb'))
print(graph_list)
pred, label = graph_test.do_test(graph_list[-1], data_dir, norm_dir, data_len, is_default, mode)
# Vb.test_config(c_test_dir=data_dir,
# c_norm_dir=norm_dir,
# c_initial_logs_dir=model_dir, c_batch_size_eval=batch_size,
# c_data_len=data_len)
# Vb.test_config(c_test_dir=data_dir,
# c_norm_dir='/home/sbie/storage3/github/VAD_Toolkit/VAD/saved_model/backup_norm',
# c_initial_logs_dir='/home/sbie/storage3/github/VAD_Toolkit/VAD/saved_model/backup_ckpt', c_batch_size_eval=batch_size,
# c_data_len=data_len)
#
# pred, label = Vb.main()
elif mode == 2:
start_time = time()
print(os.path.abspath('./configure/DNN'))
sys.path.insert(0, os.path.abspath('./configure/DNN'))
import config as cg
if is_default:
graph_list = sorted(glob.glob(model_dir + '/backup/backup_pb/frozen_model_DNN.pb'))
norm_dir = model_dir + '/backup/backup_norm'
pred, label = graph_test.do_test(graph_list[-1], data_dir, norm_dir, data_len, is_default, mode)
else:
graph_list = sorted(glob.glob(model_dir + '/graph/DNN/*.pb'))
pred, label = graph_test.do_test(graph_list[-1], data_dir, norm_dir, data_len, is_default, mode)
# Vd.test_config(c_test_dir=data_dir,
# c_norm_dir=norm_dir,
# c_initial_logs_dir=model_dir, c_batch_size_eval=batch_size,
# c_data_len=data_len)
#
# pred, label = Vd.main()
end_time = time()
time_taken = end_time - start_time
print(time_taken)
elif mode == 3:
sys.path.insert(0, os.path.abspath('./configure/LSTM'))
import config as cg
if is_default:
graph_list = sorted(glob.glob(model_dir + '/backup/backup_pb/frozen_model_LSTM.pb'))
norm_dir = model_dir + '/backup/backup_norm'
pred, label = graph_test.do_test(graph_list[-1], data_dir, norm_dir, data_len, is_default, mode)
else:
graph_list = sorted(glob.glob(model_dir + '/graph/LSTM/*.pb'))
print(graph_list)
pred, label = graph_test.do_test(graph_list[-1], data_dir, norm_dir, data_len, is_default, mode)
# Vl.test_config(c_test_dir=data_dir,
# c_norm_dir=norm_dir,
# c_initial_logs_dir=model_dir, c_batch_num=200, c_seq_size=20,
# c_data_len=data_len)
#
# pred, label = Vl.main()
sio.savemat('./result/pred.mat', {'pred': pred})
sio.savemat('./result/label.mat', {'label': label})
print("done")
| 5,318
| 35.9375
| 143
|
py
|
VAD
|
VAD-master/lib/python/graph_test.py
|
import tensorflow as tf
import utils as utils
import numpy as np
import os, sys
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
def bdnn_prediction(batch_size, logits, threshold=0.6, w=19, u=9):
bdnn_batch_size = batch_size + 2*w
result = np.zeros((bdnn_batch_size, 1))
indx = np.arange(bdnn_batch_size) + 1
indx = indx.reshape((bdnn_batch_size, 1))
indx = utils.bdnn_transform(indx, w, u)
indx = indx[w:(bdnn_batch_size-w), :]
indx_list = np.arange(w, bdnn_batch_size - w)
for i in indx_list:
indx_temp = np.where((indx-1) == i)
pred = logits[indx_temp]
pred = np.sum(pred)/pred.shape[0]
result[i] = pred + np.random.rand(1)*1e-4
result = np.trim_zeros(result)
soft_result = np.float32(result)
result = result >= threshold
return result.astype(int), soft_result
def load_graph(frozen_graph_filename):
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we import the graph_def into a new Graph and returns it
with tf.Graph().as_default() as graph:
# The name var will prefix every op/nodes in your graph
# Since we load everything in a new graph, this is not needed
tf.import_graph_def(graph_def, name="prefix")
return graph
def do_test(fname_model, test_file_dir, norm_dir, data_len, is_default, model_type):
eval_input_dir = test_file_dir
eval_output_dir = test_file_dir + '/Labels'
graph = load_graph(fname_model)
w = 19
u = 9
# [print(n.name) for n in graph.as_graph_def().node]
# for op in graph.get_operations():
# print(op.name)
final_softout = []
final_label = []
if model_type == 0: # acam
import data_reader_bDNN_v2 as dr
print(os.path.abspath('./configure/ACAM'))
sys.path.insert(0, os.path.abspath('./configure/ACAM'))
import config as cg
if is_default:
w = 19
u = 9
valid_batch_size = 4096
else:
w = cg.w
u = cg.u
valid_batch_size = cg.batch_size
valid_data_set = dr.DataReader(eval_input_dir, eval_output_dir, norm_dir, w=w, u=u, name="eval")
node_inputs = graph.get_tensor_by_name('prefix/model_1/inputs:0')
node_labels = graph.get_tensor_by_name('prefix/model_1/labels:0')
node_keep_probability = graph.get_tensor_by_name('prefix/model_1/keep_probabilty:0')
node_logits = graph.get_tensor_by_name('prefix/model_1/logits:0')
node_raw_labels = graph.get_tensor_by_name('prefix/model_1/raw_labels:0')
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(valid_batch_size)
feed_dict = {node_inputs: valid_inputs, node_labels: valid_labels,
node_keep_probability: 1}
if valid_data_set.eof_checker():
final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
final_label = np.reshape(np.asarray(final_label), [-1, 1])
valid_data_set.reader_initialize()
# print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
with tf.Session(graph=graph) as sess:
logits, raw_labels = sess.run([node_logits, node_raw_labels], feed_dict=feed_dict)
soft_pred = bdnn_prediction(valid_batch_size, logits, threshold=0.6, w=w, u=u)[1]
raw_labels = raw_labels.reshape((-1, 1))
final_softout.append(soft_pred)
final_label.append(raw_labels)
# if valid_data_set.eof_checker():
# final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
# final_label = np.reshape(np.asarray(final_label), [-1, 1])
# valid_data_set.reader_initialize()
# # print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
# break
return final_softout[0:data_len, :], final_label[0:data_len, :]
if model_type == 1: # bdnn
import data_reader_bDNN_v2 as dr
print(os.path.abspath('./configure/bDNN'))
sys.path.insert(0, os.path.abspath('./configure/bDNN'))
import config as cg
if is_default:
w = 19
u = 9
valid_batch_size = 4096
else:
w = cg.w
u = cg.u
valid_batch_size = cg.batch_size
valid_data_set = dr.DataReader(eval_input_dir, eval_output_dir, norm_dir, w=w, u=u, name="eval") # training data reader initialization
node_inputs = graph.get_tensor_by_name('prefix/model_1/inputs:0')
node_labels = graph.get_tensor_by_name('prefix/model_1/labels:0')
node_keep_probability = graph.get_tensor_by_name('prefix/model_1/keep_probabilty:0')
node_logits = graph.get_tensor_by_name('prefix/model_1/logits:0')
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(valid_batch_size)
feed_dict = {node_inputs: valid_inputs, node_labels: valid_labels,
node_keep_probability: 1}
if valid_data_set.eof_checker():
final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
final_label = np.reshape(np.asarray(final_label), [-1, 1])
valid_data_set.reader_initialize()
# print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
with tf.Session(graph=graph) as sess:
logits, labels = sess.run([node_logits, node_labels], feed_dict=feed_dict)
soft_pred = bdnn_prediction(valid_batch_size, logits, threshold=0.6, w=w, u=u)[1]
raw_indx = int(np.floor(labels.shape[1] / 2))
raw_labels = labels[:, raw_indx]
raw_labels = raw_labels.reshape((-1, 1))
final_softout.append(soft_pred)
final_label.append(raw_labels)
return final_softout[0:data_len, :], final_label[0:data_len, :]
if model_type == 2: # dnn
import data_reader_DNN_v2 as dnn_dr
print(os.path.abspath('./configure/DNN'))
sys.path.insert(0, os.path.abspath('./configure/DNN'))
import config as cg
if is_default:
w = 19
u = 9
valid_batch_size = 4096
else:
w = cg.w
u = cg.u
valid_batch_size = cg.batch_size
valid_data_set = dnn_dr.DataReader(eval_input_dir, eval_output_dir, norm_dir, w=w, u=u, name="eval")
node_inputs = graph.get_tensor_by_name('prefix/model_1/inputs:0')
node_labels = graph.get_tensor_by_name('prefix/model_1/labels:0')
node_keep_probability = graph.get_tensor_by_name('prefix/model_1/keep_probabilty:0')
node_softpred = graph.get_tensor_by_name('prefix/model_1/soft_pred:0')
node_raw_labels = graph.get_tensor_by_name('prefix/model_1/raw_labels:0')
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(valid_batch_size)
one_hot_labels = valid_labels.reshape((-1, 1))
one_hot_labels = utils.dense_to_one_hot(one_hot_labels, num_classes=2)
feed_dict = {node_inputs: valid_inputs, node_labels: one_hot_labels,
node_keep_probability: 1}
if valid_data_set.eof_checker():
final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
final_label = np.reshape(np.asarray(final_label), [-1, 1])
valid_data_set.reader_initialize()
# print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
with tf.Session(graph=graph) as sess:
soft_pred, raw_labels = sess.run([node_softpred, node_raw_labels], feed_dict=feed_dict)
raw_labels = raw_labels.reshape((-1, 1))
final_softout.append(soft_pred)
final_label.append(raw_labels)
return final_softout[0:data_len, :], final_label[0:data_len, :]
if model_type == 3: # lstm
import data_reader_RNN as rnn_dr
print(os.path.abspath('./configure/LSTM'))
sys.path.insert(0, os.path.abspath('./configure/LSTM'))
import config as cg
if is_default:
target_delay = 5
seq_size = 20
batch_num = 200
valid_batch_size = seq_size * batch_num
else:
target_delay = cg.target_delay
seq_size = cg.seq_len
batch_num = cg.num_batches
valid_batch_size = seq_size * batch_num
valid_data_set = rnn_dr.DataReader(eval_input_dir, eval_output_dir, norm_dir, target_delay=target_delay,
name="eval")
node_inputs = graph.get_tensor_by_name('prefix/model_1/inputs:0')
node_labels = graph.get_tensor_by_name('prefix/model_1/labels:0')
node_keep_probability = graph.get_tensor_by_name('prefix/model_1/keep_probabilty:0')
node_softpred = graph.get_tensor_by_name('prefix/model_1/soft_pred:0')
node_raw_labels = graph.get_tensor_by_name('prefix/model_1/raw_labels:0')
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(valid_batch_size)
one_hot_labels = valid_labels.reshape((-1, 1))
one_hot_labels = utils.dense_to_one_hot(one_hot_labels, num_classes=2)
feed_dict = {node_inputs: valid_inputs, node_labels: one_hot_labels,
node_keep_probability: 1}
if valid_data_set.eof_checker():
final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
final_label = np.reshape(np.asarray(final_label), [-1, 1])
valid_data_set.reader_initialize()
# print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
with tf.Session(graph=graph) as sess:
soft_pred, raw_labels = sess.run([node_softpred, node_raw_labels], feed_dict=feed_dict)
raw_labels = raw_labels.reshape((-1, 1))
final_softout.append(soft_pred)
final_label.append(raw_labels)
# if valid_data_set.eof_checker():
# final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
# final_label = np.reshape(np.asarray(final_label), [-1, 1])
# valid_data_set.reader_initialize()
# # print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
# break
return final_softout[0:data_len, :], final_label[0:data_len, :]
if __name__ == '__main__':
graph = load_graph('/home/sbie/storage3/github/VAD_Toolkit/VAD/logs/frozen_model.pb')
print('aa')
# # Let's allow the user to pass the filename as an argument
# parser = argparse.ArgumentParser()
# parser.add_argument("--frozen_model_filename", default="results/frozen_model.pb", type=str, help="Frozen model file to import")
# parser.add_argument("--test_file_dir", default=0, type=str, help="test_file_dir")
# parser.add_argument("--prj_dir", default=0, type=str, help="prj_dir")
# parser.add_argument("--data_len", default=0, type=int, help="data_len")
# parser.add_argument("--valid_batch_size", default=0, type=int, help="valid_batch_size")
# parser.add_argument("-m", default=0, type=int, help="model type")
#
# args = parser.parse_args()
#
# # We use our "load_graph" function
# graph = load_graph(args.frozen_model_filename)
#
# # We can verify that we can access the list of operations in the graph
# for op in graph.get_operations():
# print(op.name)
# # prefix/Placeholder/inputs_placeholder
# # ...
# # prefix/Accuracy/predictions
#
# # We access the input and output nodes
# x = graph.get_tensor_by_name('prefix/Placeholder/inputs_placeholder:0')
# y = graph.get_tensor_by_name('prefix/Accuracy/predictions:0')
#
# # We launch a Session
# with tf.Session(graph=graph) as sess:
# # Note: we don't nee to initialize/restore anything
# # There is no Variables in this graph, only hardcoded constants
# y_out = sess.run(y, feed_dict={
# x: [[3, 5, 7, 4, 5, 1, 1, 1, 1, 1]] # < 45
# })
# # I taught a neural net to recognise when a sum of numbers is bigger than 45
# # it should return False in this case
# print(y_out) # [[ False ]] Yay, it works!
| 12,936
| 40.598071
| 143
|
py
|
VAD
|
VAD-master/lib/python/VAD_LSTM_2.py
|
import tensorflow as tf
import numpy as np
import utils as utils
import re
import data_reader_RNN as dr
import sys, os
import matplotlib.pyplot as plt
from tensorflow.contrib import rnn
from sklearn import metrics
import time
FLAGS = tf.flags.FLAGS
SEED = 1
tf.set_random_seed(SEED)
tf.flags.DEFINE_string('mode', "train", "mode : train/ test [default : train]")
file_dir = "/home/sbie/storage2/VAD_Database/SE_TIMIT_MRCG_0328"
input_dir = file_dir
output_dir = file_dir + "/Labels"
valid_file_dir = "/home/sbie/storage2/VAD_Database/NX_TIMIT_MRCG_big"
# valid_file_dir = "/media/jskim/F440795840792312/database/jskim/NX_TIMIT_MRCG_big"
norm_dir = input_dir
logs_dir = "./saved_model"
initial_logs_dir = "/home/sbie/github/VAD_Project_test/VAD_LSTM/logs_LSTM"
ckpt_name = "/LSTM"
reset = True # remove all existed logs and initialize log directories
device = "/gpu:0"
mode = 'test'
if mode is 'test':
reset = False
if reset:
os.popen('rm -rf ' + logs_dir + '/*')
os.popen('mkdir ' + logs_dir + '/train')
os.popen('mkdir ' + logs_dir + '/valid')
summary_list = ["cost", "accuracy_SNR_-5", "accuracy_SNR_0", "accuracy_SNR_5", "accuracy_SNR_10",
"accuracy_across_all_SNRs"]
learning_rate = 0.0001
eval_num_batches = 2e4
SMALL_NUM = 1e-4
max_epoch = int(1e5)
dropout_rate = 0.5
decay = 0.9 # batch normalization decay factor
target_delay = 5 # target_delay default = 19
u = 9 # u default = 9
eval_th = 0.6
th = 0.5
lstm_cell_size = 256
num_layers = 3
model_config = {'target_delay': target_delay, "u": u}
seq_size = 20
batch_num = 200
batch_size = batch_num*seq_size # batch_size = 32
valid_batch_size = batch_size
# assert (target_delay-1) % u == 0, "target_delay-1 must be divisible by u"
width = 768
num_features = 768 # MRCG feature
bdnn_winlen = (((target_delay-1) / u) * 2) + 3
# bdnn_inputsize = int(bdnn_winlen * num_features)
bdnn_inputsize = num_features
bdnn_outputsize = 2#int(bdnn_winlen)
initLr = 1e-5
scope_name = 'RNN_scope'
eval_type = 2
def train_config(c_train_dir, c_valid_dir, c_logs_dir, c_seq_size, c_batch_num, c_max_epoch, c_mode):
global file_dir
global input_dir
global output_dir
global valid_file_dir
global norm_dir
global initial_logs_dir
global logs_dir
global ckpt_name
global batch_size
global valid_batch_size
global mode
global max_epoch
global seq_size
global batch_num
file_dir = c_train_dir
valid_file_dir = c_valid_dir
input_dir = file_dir
output_dir = file_dir + "/Labels"
norm_dir = file_dir
initial_logs_dir = logs_dir = c_logs_dir
# batch_size = valid_batch_size = c_batch_size_eval + 2 * target_delay
seq_size = c_seq_size
batch_num = c_batch_num
batch_size = valid_batch_size = c_seq_size * c_batch_num
max_epoch = c_max_epoch
mode = c_mode
def test_config(c_test_dir, c_norm_dir, c_initial_logs_dir, c_seq_size, c_batch_num, c_data_len):
global test_file_dir
global norm_dir
global initial_logs_dir
global ckpt_name
global valid_batch_size
global batch_size
global data_len
global batch_num
global seq_size
global batch_num
test_file_dir = c_test_dir
norm_dir = c_norm_dir
initial_logs_dir = c_initial_logs_dir
seq_size = c_seq_size
batch_num = c_batch_num
valid_batch_size = batch_size = c_seq_size * c_batch_num
data_len = c_data_len
batch_num = batch_size/seq_size
def affine_transform(x, output_dim, name=None):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
w = tf.get_variable(name + "_w", [x.get_shape()[1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable(name + "_b", [output_dim], initializer=tf.constant_initializer(0.0))
return tf.matmul(x, w) + b
def rnn_in(inputs, batch_num, seq_size,delay):
batch_num = int(batch_num)
seq_size = int(seq_size)
delay = int(delay)
temp1 = tf.reshape(inputs,[-1,num_features])
temp2 = tf.reshape(temp1[0:batch_size,:],[batch_num,seq_size,num_features])
# temp3 = tf.zeros([batch_num,delay,num_features], tf.float32)
temp3 = temp2[1:batch_num,0:delay,:]
temp4 = tf.reshape(temp1[batch_size:batch_size+delay,:],[-1,delay,num_features])
temp5 = tf.concat([temp3,temp4],0)
return tf.concat([temp2,temp5],1)
def inference(inputs, keep_prob, is_training=True, reuse=None):
# initialization
# c1_out = affine_transform(inputs, num_hidden_1, name="hidden_1")
# inputs_shape = inputs.get_shape().as_list()
with tf.variable_scope(scope_name):
# print(inputs.get_shape().as_list())
in_rnn = rnn_in(inputs, batch_num, seq_size, target_delay)
# in_rnn = tf.reshape(inputs,[-1, seq_size+target_delay, num_features])
stacked_rnn = []
for iiLyr in range(num_layers):
stacked_rnn.append(tf.nn.rnn_cell.LSTMCell(num_units=lstm_cell_size, state_is_tuple=True))
MultiLyr_cell = tf.nn.rnn_cell.MultiRNNCell(cells=stacked_rnn, state_is_tuple=True)
outputs, _state = tf.nn.dynamic_rnn(MultiLyr_cell, in_rnn, time_major=False, dtype=tf.float32)
outputs = tf.reshape(outputs[:, 0:seq_size, :],[-1,lstm_cell_size])
outputs = tf.nn.dropout(outputs, keep_prob=keep_prob)
# # h1_out = affine_transform(inputs, num_hidden_1, name="hidden_1")
# lh1_out = utils.batch_norm_affine_transform(outputs, num_hidden_1, name="lhidden_1", decay=decay,
# is_training=is_training)
# lh1_out = tf.nn.relu(lh1_out)
# lh1_out = tf.nn.dropout(lh1_out, keep_prob=keep_prob)
logits = affine_transform(outputs, bdnn_outputsize, name="output1")
# logits = tf.sigmoid(logits)
logits = tf.reshape(logits, [-1, int(bdnn_outputsize)])
return logits
def train(loss_val, var_list):
lrDecayRate = .95
lrDecayFreq = 200
global_step = tf.Variable(0, trainable=False)
lr = tf.train.exponential_decay(initLr, global_step, lrDecayFreq, lrDecayRate, staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
grads = optimizer.compute_gradients(loss_val, var_list=var_list)
return optimizer.apply_gradients(grads, global_step=global_step)
def summary_generation(eval_file_dir):
summary_dic = {}
noise_list = os.listdir(eval_file_dir)
noise_list = sorted(noise_list)
summary_dic["summary_ph"] = summary_ph = tf.placeholder(dtype=tf.float32)
for name in noise_list:
with tf.variable_scope(name):
for summary_name in summary_list:
summary_dic[name+"_"+summary_name] = tf.summary.scalar(summary_name, summary_ph)
with tf.variable_scope("Averaged_Results"):
summary_dic["cost_across_all_noise_types"] = tf.summary.scalar("cost_across_all_noise_types", summary_ph)
summary_dic["accuracy_across_all_noise_types"]\
= tf.summary.scalar("accuracy_across_all_noise_types", summary_ph)
summary_dic["variance_across_all_noise_types"]\
= tf.summary.scalar("variance_across_all_noise_types", summary_ph)
summary_dic["AUC_across_all_noise_types"]\
= tf.summary.scalar("AUC_across_all_noise_types", summary_ph)
return summary_dic
def full_evaluation(m_eval, sess_eval, batch_size_eval, eval_file_dir, summary_writer, summary_dic, itr):
mean_cost = []
mean_accuracy = []
mean_auc = []
mean_time = []
print("-------- Performance for each of noise types --------")
noise_list = os.listdir(eval_file_dir)
noise_list = sorted(noise_list)
summary_ph = summary_dic["summary_ph"]
for i in range(len(noise_list)):
print("full time evaluation, now loading : %d",i)
noise_name = '/' + noise_list[i]
eval_input_dir = eval_file_dir + noise_name
eval_output_dir = eval_file_dir + noise_name + '/Labels'
##########################################
eval_calc_dir = eval_file_dir + noise_name + '/test_result' # for Final layer information saving
##########################################
eval_data_set = dr.DataReader(eval_input_dir, eval_output_dir, norm_dir, target_delay=target_delay, u=u, name="eval")
eval_cost, eval_accuracy, eval_list, eval_auc, auc_list, eval_time = evaluation(m_eval, eval_data_set, sess_eval, batch_size_eval, noise_list[i], save_dir = eval_calc_dir)
print("--noise type : " + noise_list[i])
print("cost: %.3f, accuracy across all SNRs: %.3f, auc across all SNRs: %.3f " % (eval_cost, eval_accuracy, eval_auc))
print('accuracy wrt SNR:')
print('SNR_-5 : %.3f, SNR_0 : %.3f, SNR_5 : %.3f, SNR_10 : %.3f' % (eval_list[0], eval_list[1],
eval_list[2], eval_list[3]))
print('AUC wrt SNR:')
print('SNR_-5 : %.3f, SNR_0 : %.3f, SNR_5 : %.3f, SNR_10 : %.3f' % (auc_list[0], auc_list[1],
auc_list[2], auc_list[3]))
eval_summary_list = [eval_cost] + eval_list + [eval_accuracy] + [eval_auc]
for j, summary_name in enumerate(summary_list):
summary_str = sess_eval.run(summary_dic[noise_list[i]+"_"+summary_name],
feed_dict={summary_ph: eval_summary_list[j]})
summary_writer.add_summary(summary_str, itr)
mean_cost.append(eval_cost)
mean_accuracy.append(eval_accuracy)
mean_auc.append(eval_auc)
mean_time.append(eval_time)
mean_cost = np.mean(np.asarray(mean_cost))
var_accuracy = np.var(np.asarray(mean_accuracy))
mean_accuracy = np.mean(np.asarray(mean_accuracy))
mean_auc = np.mean(np.asarray(mean_auc))
mean_time = np.mean(np.asarray(mean_time))
summary_writer.add_summary(sess_eval.run(summary_dic["cost_across_all_noise_types"],
feed_dict={summary_ph: mean_cost}), itr)
summary_writer.add_summary(sess_eval.run(summary_dic["accuracy_across_all_noise_types"],
feed_dict={summary_ph: mean_accuracy}), itr)
summary_writer.add_summary(sess_eval.run(summary_dic["variance_across_all_noise_types"],
feed_dict={summary_ph: var_accuracy}), itr)
summary_writer.add_summary(sess_eval.run(summary_dic["AUC_across_all_noise_types"],
feed_dict={summary_ph: mean_auc}), itr)
print("-------- Performance across all of noise types --------")
print("cost : %.3f" % mean_cost)
print("******* averaged accuracy across all noise_types : %.3f *******" % mean_accuracy)
print("******* variance of accuracies across all noise_types : %6.6f *******" % var_accuracy)
print("******* variance of AUC across all noise_types : %6.6f *******" % mean_auc)
print("******* mean time : %6.6f *******" % mean_time)
return mean_auc
def evaluation(m_valid, valid_data_set, sess, eval_batch_size, noise_name, num_batches=eval_num_batches,
save_dir = None):
# num_samples = valid_data_set.num_samples
# num_batches = num_samples / batch_size
avg_valid_cost = 0.
avg_valid_accuracy = 0.
avg_valid_time = 0.
# AUC = 0.
itr_sum = 0.
file_num_before = -1
accuracy_list = [0 for i in range(valid_data_set._file_len)]
cost_list = [0 for i in range(valid_data_set._file_len)]
auc_list = [0 for i in range(valid_data_set._file_len)]
time_list = [0 for i in range(valid_data_set._file_len)]
itr_file = 0
channel_values = []
save_calc_dir = ''
valid_name_before = ''
# plt.figure()
while True:
# valid_name_before = save_dir + 'test'+ [file_num_before] + '.txt'
valid_inputs, valid_labels = valid_data_set.next_batch(eval_batch_size)
if valid_data_set.file_change_checker():
accuracy_list[itr_file] = avg_valid_accuracy / itr_sum
cost_list[itr_file] = avg_valid_cost / itr_sum
auc_list[itr_file] = utils.plot_ROC2(channel_values,file_num_before, noise_name)
time_list[itr_file] = avg_valid_time / ((itr_sum*batch_size)/16000)
avg_valid_accuracy = 0.
avg_valid_cost = 0.
avg_valid_time = 0.
channel_values = []
# avg_valid_auc = 0.
itr_sum = 0
itr_file += 1
valid_data_set.file_change_initialize()
if valid_data_set.eof_checker():
#######
# AUC = utils.plot_ROC2(valid_name_before, save_calc_dir,file_num_before, noise_name)
# f_eval.close()
#######
valid_data_set.reader_initialize()
print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
# if eval_batch_size * itr_file > 5000:
# f_eval.close()
# break
one_hot_vlabels = valid_labels.reshape((-1, 1))
one_hot_vlabels = dense_to_one_hot(one_hot_vlabels, num_classes=2)
feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: one_hot_vlabels,
m_valid.keep_probability: 1}
start_time = time.time()
logits_val = sess.run(m_valid.logits, feed_dict=feed_dict)
check_time = time.time()-start_time
valid_cost, valid_accuracy = sess.run([m_valid.cost, m_valid.accuracy], feed_dict=feed_dict)
# print(valid_labels.shape)
# print(logits_val)
save_val = np.concatenate([logits_val,valid_labels], axis = 1)
###############################################
file_num = valid_data_set._num_file
channel_values.append(save_val)
avg_valid_cost += valid_cost
avg_valid_accuracy += valid_accuracy
avg_valid_time += check_time
# avg_valid_auc += valid_auc
itr_sum += 1
############################################
file_num_before = valid_data_set._num_file
############################################
total_avg_valid_cost = np.asscalar(np.mean(np.asarray(cost_list)))
total_avg_valid_accuracy = np.asscalar(np.mean(np.asarray(accuracy_list)))
total_avg_valid_auc = np.asscalar(np.mean(np.asarray(auc_list)))
total_avg_valid_time = np.asscalar(np.mean(np.asarray(time_list)))
return total_avg_valid_cost, total_avg_valid_accuracy, accuracy_list, total_avg_valid_auc, auc_list, total_avg_valid_time
def dense_to_one_hot(labels_dense, num_classes=2):
"""Convert class labels from scalars to one-hot vectors."""
# copied from TensorFlow tutorial
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[(index_offset + labels_dense.ravel()).astype(int)] = 1
return labels_one_hot.astype(np.float32)
class Model(object):
def __init__(self, is_training=True):
self.keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
self.inputs = inputs = tf.placeholder(tf.float32, shape=[batch_size + target_delay, bdnn_inputsize],
name="inputs")
self.labels = labels = tf.placeholder(tf.float32, shape=[batch_size, bdnn_outputsize], name="labels")
# self.inputs = inputs = tf.placeholder(tf.float32, shape=[None, bdnn_inputsize],
# name="inputs")
# self.labels = labels = tf.placeholder(tf.float32, shape=[None, bdnn_outputsize], name="labels")
# set inference graph
self.logits = logits = inference(inputs, self.keep_probability, is_training=is_training) # (batch_size, bdnn_outputsize)
# set objective function
# self.cost = cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits))
pred = tf.argmax(logits, axis=1, name="prediction")
softpred = tf.identity(logits[:, 1], name="soft_pred")
pred = tf.cast(pred, tf.int32)
truth = tf.cast(labels[:, 1], tf.int32)
self.raw_labels = tf.identity(truth, name="raw_labels")
log_one = logits[:, 1]
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(pred, truth), tf.float32))
self.cost = cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits = logits))
# self.cost = cost = tf.reduce_mean(tf.square(labels - logits))
# fpr, tpr, thresholds = metrics.roc_curve(np.array(truth), np.array(pred), pos_label=2)
# self.auc = metrics.auc(fpr, tpr)
# cost = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
# cost = tf.reduce_sum(tf.square(labels - logits), axis=1)
# self.cost = cost = tf.reduce_mean(cost)
# self.sigm = tf.sigmoid(logits)
# set training strategy
trainable_var = tf.trainable_variables()
self.train_op = train(cost, trainable_var)
def main(prj_dir=None, model=None, mode=None):
# Configuration Part #
if mode is 'train':
import path_setting as ps
set_path = ps.PathSetting(prj_dir, model)
logs_dir = initial_logs_dir = set_path.logs_dir
input_dir = set_path.input_dir
output_dir = set_path.output_dir
norm_dir = set_path.norm_dir
valid_file_dir = set_path.valid_file_dir
sys.path.insert(0, prj_dir+'/configure/LSTM')
import config as cg
global seq_size, batch_num
seq_size = cg.seq_len
batch_num = cg.num_batches
global learning_rate, dropout_rate, max_epoch, batch_size, valid_batch_size
learning_rate = cg.lr
dropout_rate = cg.dropout_rate
max_epoch = cg.max_epoch
batch_size = valid_batch_size = batch_num*seq_size
global target_delay
target_delay = cg.target_delay
global lstm_cell_size, num_layers
lstm_cell_size = cg.cell_size
num_layers = cg.num_layers
# Graph Part #
print("Graph initialization...")
with tf.device(device):
with tf.variable_scope("model", reuse=None):
m_train = Model(is_training=True)
with tf.variable_scope("model", reuse=True):
m_valid = Model(is_training=False)
print("Done")
# Summary Part #
print("Setting up summary op...")
summary_ph = tf.placeholder(dtype=tf.float32)
with tf.variable_scope("Training_procedure"):
cost_summary_op = tf.summary.scalar("cost", summary_ph)
accuracy_summary_op = tf.summary.scalar("accuracy", summary_ph)
# train_summary_writer = tf.summary.FileWriter(logs_dir + '/train/', max_queue=4)
# valid_summary_writer = tf.summary.FileWriter(logs_dir + '/valid/', max_queue=4)
# summary_dic = summary_generation(valid_file_dir)
print("Done")
# Model Save Part #
print("Setting up Saver...")
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(logs_dir + '/LSTM')
print("Done")
# Session Part #
sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
sess_config.gpu_options.allow_growth = True
sess = tf.Session(config=sess_config)
if mode is 'train':
train_summary_writer = tf.summary.FileWriter(logs_dir + '/train/', sess.graph, max_queue=2)
valid_summary_writer = tf.summary.FileWriter(logs_dir + '/valid/', max_queue=2)
if ckpt and ckpt.model_checkpoint_path: # model restore
print("Model restored...")
if mode is 'train':
saver.restore(sess, ckpt.model_checkpoint_path)
else:
saver.restore(sess, initial_logs_dir+ckpt_name)
# saver.restore(sess, logs_dir+ckpt_name)
saver.save(sess, logs_dir + "/model_LSTM.ckpt", 0) # model save
print("Done")
else:
sess.run(tf.global_variables_initializer()) # if the checkpoint doesn't exist, do initialization
if mode is 'train':
train_data_set = dr.DataReader(input_dir, output_dir, norm_dir, target_delay=target_delay, u=u, name="train") # training data reader initialization
# train_data_set = dr.DataReader(input_dir, output_dir, norm_dir, target_delay=target_delay, u=u, name="train") # training data reader initialization
if mode is 'train':
for itr in range(max_epoch):
train_inputs, train_labels = train_data_set.next_batch(batch_size)
one_hot_labels = train_labels.reshape((-1, 1))
one_hot_labels = dense_to_one_hot(one_hot_labels, num_classes=2)
feed_dict = {m_train.inputs: train_inputs, m_train.labels: one_hot_labels,
m_train.keep_probability: dropout_rate}
sess.run(m_train.train_op, feed_dict=feed_dict)
if itr % 10 == 0 and itr >= 0:
train_cost, train_accuracy = sess.run([m_train.cost, m_train.accuracy], feed_dict=feed_dict)
print("Step: %d, train_cost: %.4f, train_accuracy=%4.4f" % (itr, train_cost, train_accuracy*100))
train_cost_summary_str = sess.run(cost_summary_op, feed_dict={summary_ph: train_cost})
train_accuracy_summary_str = sess.run(accuracy_summary_op, feed_dict={summary_ph: train_accuracy})
train_summary_writer.add_summary(train_cost_summary_str, itr) # write the train phase summary to event files
train_summary_writer.add_summary(train_accuracy_summary_str, itr)
# if train_data_set.eof_checker():
if itr % 50 == 0 and itr > 0:
saver.save(sess, logs_dir + "/model.ckpt", itr) # model save
print('validation start!')
valid_accuracy, valid_cost = \
utils.do_validation(m_valid, sess, valid_file_dir, norm_dir, type='LSTM')
print("valid_cost: %.4f, valid_accuracy=%4.4f" % (valid_cost, valid_accuracy * 100))
valid_cost_summary_str = sess.run(cost_summary_op, feed_dict={summary_ph: valid_cost})
valid_accuracy_summary_str = sess.run(accuracy_summary_op, feed_dict={summary_ph: valid_accuracy})
valid_summary_writer.add_summary(valid_cost_summary_str, itr) # write the train phase summary to event files
valid_summary_writer.add_summary(valid_accuracy_summary_str, itr)
# mean_acc = full_evaluation(m_valid, sess, valid_batch_size, valid_file_dir,
# valid_summary_writer, summary_dic, itr)
# if mean_acc > 0.968:
# print('finish!!')
# break
# train_data_set.reader_initialize()
# print('Train data reader was initialized!') # initialize eof flag & num_file & start index
elif mode is 'test':
final_softout, final_label = utils.vad_test3(m_valid, sess, valid_batch_size, test_file_dir, norm_dir, data_len,
eval_type)
if data_len is None:
return final_softout, final_label
else:
return final_softout[0:data_len, :], final_label[0:data_len, :]
if __name__ == "__main__":
tf.app.run()
| 23,706
| 37.92775
| 180
|
py
|
VAD
|
VAD-master/lib/python/VAD_bDNN.py
|
import tensorflow as tf
import numpy as np
import utils as utils
import re
import data_reader_bDNN_v2 as dr
import os, sys
from sklearn import metrics
from scipy.optimize import brentq
from scipy.interpolate import interp1d
# FLAGS = tf.flags.FLAGS
#
# tf.flags.DEFINE_string('mode', "test", "mode : train/ test [default : train]")
mode = 'test'
file_dir = "/home/sbie/storage/VAD_Database/SE_TIMIT_MRCG_0328"
input_dir = file_dir
output_dir = file_dir + "/Labels"
valid_file_dir = "/home/sbie/storage/VAD_Database/NX_TIMIT_MRCG_small"
# test_file_dir = valid_file_dir
test_file_dir = "/home/sbie/storage2/VAD_Database/NX_TIMIT_MRCG_small"
norm_dir = input_dir
initial_logs_dir = "/home/sbie/storage2/VAD_Database/saved_model/my_converted_checkpoint2"
logs_dir = "/home/sbie/github/VAD_Project/VAD_bDNN/logs_bDNN"
ckpt_name = '/bDNN'
reset = True # remove all existed logs and initialize log directories
device = '/gpu:0'
if mode is 'test':
reset = False
logs_dir = '/home/sbie/storage2/logs_backup0428/logs_bDNN853'
if reset:
os.popen('rm -rf ' + logs_dir + '/*')
os.popen('mkdir ' + logs_dir + '/train')
os.popen('mkdir ' + logs_dir + '/valid')
summary_list = ["cost", "accuracy_SNR_-5", "accuracy_SNR_0", "accuracy_SNR_5", "accuracy_SNR_10",
"accuracy_across_all_SNRs"]
eval_num_batches = 2e5
SMALL_NUM = 1e-4
max_epoch = 301
dropout_rate = 0.5
decay = 0.9 # batch normalization decay factor
w = 19 # w default = 19
u = 9 # u default = 9
eval_th = 0.6
th = 0.6
num_hidden_1 = 512
num_hidden_2 = 512
batch_size = 4096 + 2*w # batch_size = 4096
valid_batch_size = batch_size
assert (w-1) % u == 0, "w-1 must be divisible by u"
num_features = 768 # MRCG feature
bdnn_winlen = (((w-1) / u) * 2) + 3
bdnn_inputsize = int(bdnn_winlen * num_features)
bdnn_outputsize = int(bdnn_winlen)
initLr = 2e-5
data_len = None
eval_type = 1
# initLr = 5e-2
def train_config(c_train_dir, c_valid_dir, c_logs_dir, c_batch_size_eval, c_max_epoch, c_mode):
global file_dir
global input_dir
global output_dir
global valid_file_dir
global norm_dir
global initial_logs_dir
global logs_dir
global ckpt_name
global batch_size
global valid_batch_size
global mode
global max_epoch
file_dir = c_train_dir
valid_file_dir = c_valid_dir
input_dir = file_dir
output_dir = file_dir + "/Labels"
norm_dir = file_dir
initial_logs_dir = logs_dir = c_logs_dir
# batch_size = valid_batch_size = c_batch_size_eval + 2 * w
batch_size = valid_batch_size = c_batch_size_eval
max_epoch = c_max_epoch
mode = c_mode
def test_config(c_test_dir, c_norm_dir, c_initial_logs_dir, c_batch_size_eval, c_data_len):
global test_file_dir
global norm_dir
global initial_logs_dir
global ckpt_name
global valid_batch_size
global data_len
test_file_dir = c_test_dir
norm_dir = c_norm_dir
initial_logs_dir = c_initial_logs_dir
valid_batch_size = c_batch_size_eval
data_len = c_data_len
def affine_transform(x, output_dim, name=None):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
w = tf.get_variable(name + "_w", [x.get_shape()[1], output_dim], initializer=tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable(name + "_b", [output_dim], initializer=tf.constant_initializer(0.0))
return tf.matmul(x, w) + b
def inference(inputs, keep_prob, is_training=True):
# initialization
# h1_out = affine_transform(inputs, num_hidden_1, name="hidden_1")
h1_out = utils.batch_norm_affine_transform(inputs, num_hidden_1, name="hidden_1", decay=decay, is_training=is_training)
h1_out = tf.nn.relu(h1_out)
h1_out = tf.nn.dropout(h1_out, keep_prob=keep_prob)
# h2_out = utils.batch_norm_affine_transform(h1_out, num_hidden_2, name="hidden_2")
h2_out = utils.batch_norm_affine_transform(h1_out, num_hidden_2, name="hidden_2", decay=decay, is_training=is_training)
h2_out = tf.nn.relu(h2_out)
h2_out = tf.nn.dropout(h2_out, keep_prob=keep_prob)
logits = affine_transform(h2_out, bdnn_outputsize, name="output")
logits = tf.sigmoid(logits)
logits = tf.reshape(logits, [-1, int(bdnn_outputsize)])
logits = tf.identity(logits, name="logits")
return logits
def train(loss_val, var_list):
lrDecayRate = .95
lrDecayFreq = 200
global_step = tf.Variable(0, trainable=False)
lr = tf.train.exponential_decay(initLr, global_step, lrDecayFreq, lrDecayRate, staircase=True)
optimizer = tf.train.AdamOptimizer(lr)
grads = optimizer.compute_gradients(loss_val, var_list=var_list)
return optimizer.apply_gradients(grads, global_step=global_step)
def bdnn_prediction(batch_size, logits, threshold=th):
bdnn_batch_size = batch_size + 2*w
result = np.zeros((bdnn_batch_size, 1))
indx = np.arange(bdnn_batch_size) + 1
indx = indx.reshape((bdnn_batch_size, 1))
indx = utils.bdnn_transform(indx, w, u)
indx = indx[w:(bdnn_batch_size-w), :]
indx_list = np.arange(w, bdnn_batch_size - w)
for i in indx_list:
indx_temp = np.where((indx-1) == i)
pred = logits[indx_temp]
pred = np.sum(pred)/pred.shape[0]
result[i] = pred + np.random.rand(1)*1e-4
result = np.trim_zeros(result)
soft_result = np.float32(result)
result = result >= threshold
return result.astype(int), soft_result
def summary_generation(eval_file_dir):
summary_dic = {}
noise_list = os.listdir(eval_file_dir)
noise_list = sorted(noise_list)
summary_dic["summary_ph"] = summary_ph = tf.placeholder(dtype=tf.float32)
for name in noise_list:
with tf.variable_scope(name):
for summary_name in summary_list:
summary_dic[name+"_"+summary_name] = tf.summary.scalar(summary_name, summary_ph)
with tf.variable_scope("Averaged_Results"):
summary_dic["cost_across_all_noise_types"] = tf.summary.scalar("cost_across_all_noise_types", summary_ph)
summary_dic["accuracy_across_all_noise_types"]\
= tf.summary.scalar("accuracy_across_all_noise_types", summary_ph)
summary_dic["variance_across_all_noise_types"]\
= tf.summary.scalar("variance_across_all_noise_types", summary_ph)
return summary_dic
def full_evaluation(m_eval, sess_eval, batch_size_eval, eval_file_dir, summary_writer, summary_dic, itr):
mean_cost = []
mean_accuracy = []
mean_auc = []
print("-------- Performance for each of noise types --------")
noise_list = os.listdir(eval_file_dir)
noise_list = sorted(noise_list)
summary_ph = summary_dic["summary_ph"]
for i in range(len(noise_list)):
noise_name = '/' + noise_list[i]
eval_input_dir = eval_file_dir + noise_name
eval_output_dir = eval_file_dir + noise_name + '/Labels'
eval_data_set = dr.DataReader(eval_input_dir, eval_output_dir, norm_dir, w=w, u=u, name="eval")
eval_cost, eval_accuracy, eval_list, eval_auc, eval_auc_list = evaluation(m_eval, eval_data_set, sess_eval, batch_size_eval)
print("--noise type : " + noise_list[i])
print("cost: %.4f, accuracy across all SNRs: %.4f, auc across all SNRS: %.4f" % (
eval_cost, eval_accuracy * 100, eval_auc*100))
print('accuracy wrt SNR:')
print('SNR_-5 : %.4f, SNR_0 : %.4f, SNR_5 : %.4f, SNR_10 : %.4f' % (eval_list[0]*100, eval_list[1]*100,
eval_list[2]*100, eval_list[3]*100))
print('AUC wrt SNR:')
print('SNR_-5 : %.4f, SNR_0 : %.4f, SNR_5 : %.4f, SNR_10 : %.4f' % (eval_auc_list[0]*100, eval_auc_list[1]*100,
eval_auc_list[2]*100, eval_auc_list[3]*100))
print('')
eval_summary_list = [eval_cost] + eval_list + [eval_accuracy]
for j, summary_name in enumerate(summary_list):
summary_str = sess_eval.run(summary_dic[noise_list[i]+"_"+summary_name], feed_dict={summary_ph: eval_summary_list[j]})
summary_writer.add_summary(summary_str, itr)
mean_cost.append(eval_cost)
mean_accuracy.append(eval_accuracy)
mean_auc.append(eval_auc)
mean_cost = np.mean(np.asarray(mean_cost))
var_accuracy = np.var(np.asarray(mean_accuracy))
mean_accuracy = np.mean(np.asarray(mean_accuracy))
mean_auc = np.mean(np.asarray(mean_auc))
summary_writer.add_summary(sess_eval.run(summary_dic["cost_across_all_noise_types"],
feed_dict={summary_ph: mean_cost}), itr)
summary_writer.add_summary(sess_eval.run(summary_dic["accuracy_across_all_noise_types"],
feed_dict={summary_ph: mean_accuracy}), itr)
summary_writer.add_summary(sess_eval.run(summary_dic["variance_across_all_noise_types"],
feed_dict={summary_ph: var_accuracy}), itr)
print("-------- Performance across all of noise types --------")
print("cost : %.4f" % mean_cost)
print("******* averaged accuracy across all noise_types : %.4f *******" % (mean_accuracy*100))
print("******* averaged auc across all noise_types : %.4f *******" % (mean_auc*100))
print("******* variance of accuracies across all noise_types : %4.4f *******" % (var_accuracy*100))
def evaluation(m_valid, valid_data_set, sess, eval_batch_size, num_batches=eval_num_batches):
# num_samples = valid_data_set.num_samples
# num_batches = num_samples / batch_size
avg_valid_cost = 0.
avg_valid_accuracy = 0.
avg_valid_auc = 0.
itr_sum = 0.
auc_list = [0 for i in range(valid_data_set._file_len)]
accuracy_list = [0 for i in range(valid_data_set._file_len)]
cost_list = [0 for i in range(valid_data_set._file_len)]
itr_file = 0
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(eval_batch_size)
if valid_data_set.file_change_checker():
auc_list[itr_file] = avg_valid_auc / itr_sum
accuracy_list[itr_file] = avg_valid_accuracy / itr_sum
cost_list[itr_file] = avg_valid_cost / itr_sum
avg_valid_auc = 0.
avg_valid_accuracy = 0.
avg_valid_cost = 0.
itr_sum = 0
itr_file += 1
valid_data_set.file_change_initialize()
if valid_data_set.eof_checker():
valid_data_set.reader_initialize()
print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: valid_labels,
m_valid.keep_probability: 1}
valid_cost, valid_logits = sess.run([m_valid.cost, m_valid.logits], feed_dict=feed_dict)
valid_pred, soft_pred = bdnn_prediction(eval_batch_size, valid_logits, threshold=eval_th)
# print(np.sum(valid_pred))
raw_indx = int(np.floor(valid_labels.shape[1]/2))
raw_labels = valid_labels[:, raw_indx]
raw_labels = raw_labels.reshape((-1, 1))
fpr, tpr, thresholds = metrics.roc_curve(raw_labels, soft_pred, pos_label=1)
valid_auc = metrics.auc(fpr, tpr)
# valid_auc = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
valid_accuracy = np.equal(valid_pred, raw_labels)
valid_accuracy = valid_accuracy.astype(int)
valid_accuracy = np.sum(valid_accuracy)/eval_batch_size
avg_valid_auc += valid_auc
avg_valid_cost += valid_cost
avg_valid_accuracy += valid_accuracy
itr_sum += 1
total_avg_valid_auc = np.asscalar(np.mean(np.asarray(auc_list)))
total_avg_valid_cost = np.asscalar(np.mean(np.asarray(cost_list)))
total_avg_valid_accuracy = np.asscalar(np.mean(np.asarray(accuracy_list)))
return total_avg_valid_cost, total_avg_valid_accuracy, accuracy_list, total_avg_valid_auc, auc_list
def dense_to_one_hot(labels_dense, num_classes=2):
"""Convert class labels from scalars to one-hot vectors."""
# copied from TensorFlow tutorial
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[(index_offset + labels_dense.ravel()).astype(int)] = 1
return labels_one_hot.astype(np.float32)
class Model(object):
def __init__(self, is_training=True):
self.keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
self.inputs = inputs = tf.placeholder(tf.float32, shape=[None, bdnn_inputsize],
name="inputs")
self.labels = labels = tf.placeholder(tf.float32, shape=[None, bdnn_outputsize], name="labels")
# set inference graph
self.logits = logits = inference(inputs, self.keep_probability, is_training=is_training) # (batch_size, bdnn_outputsize)
# set objective function
# self.cost = cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits))
self.cost = cost = tf.reduce_mean(tf.square(labels - logits))
# cost = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
# cost = tf.reduce_sum(tf.square(labels - logits), axis=1)
# self.cost = cost = tf.reduce_mean(cost)
# self.sigm = tf.sigmoid(logits)
# set training strategy
trainable_var = tf.trainable_variables()
self.train_op = train(cost, trainable_var)
def main(prj_dir=None, model=None, mode=None):
# Configuration Part #
if mode is 'train':
import path_setting as ps
set_path = ps.PathSetting(prj_dir, model)
logs_dir = initial_logs_dir = set_path.logs_dir
input_dir = set_path.input_dir
output_dir = set_path.output_dir
norm_dir = set_path.norm_dir
valid_file_dir = set_path.valid_file_dir
sys.path.insert(0, prj_dir+'/configure/bDNN')
import config as cg
global initLr, dropout_rate, max_epoch, batch_size, valid_batch_size
initLr = cg.lr
dropout_rate = cg.dropout_rate
max_epoch = cg.max_epoch
batch_size = valid_batch_size = cg.batch_size
global w, u
w = cg.w
u = cg.u
global bdnn_winlen, bdnn_inputsize, bdnn_outputsize
bdnn_winlen = (((w-1) / u) * 2) + 3
bdnn_inputsize = int(bdnn_winlen * num_features)
bdnn_outputsize = int(bdnn_winlen)
global num_hidden_1, num_hidden_2
num_hidden_1 = cg.num_hidden_1
num_hidden_2 = cg.num_hidden_2
# Graph Part #
print("Graph initialization...")
with tf.device(device):
with tf.variable_scope("model", reuse=None):
m_train = Model(is_training=True)
with tf.variable_scope("model", reuse=True):
m_valid = Model(is_training=False)
print("Done")
# Summary Part #
print("Setting up summary op...")
summary_ph = tf.placeholder(dtype=tf.float32)
with tf.variable_scope("Training_procedure"):
cost_summary_op = tf.summary.scalar("cost", summary_ph)
accuracy_summary_op = tf.summary.scalar("accuracy", summary_ph)
# train_summary_writer = tf.summary.FileWriter(logs_dir + '/train/', max_queue=4)
# valid_summary_writer = tf.summary.FileWriter(logs_dir + '/valid/', max_queue=4)
# summary_dic = summary_generation(valid_file_dir)
print("Done")
# Model Save Part #
print("Setting up Saver...")
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(initial_logs_dir)
print("Done")
# Session Part #
sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
sess_config.gpu_options.allow_growth = True
sess = tf.Session(config=sess_config)
if mode is 'train':
train_summary_writer = tf.summary.FileWriter(logs_dir + '/train/', sess.graph, max_queue=2)
valid_summary_writer = tf.summary.FileWriter(logs_dir + '/valid/', max_queue=2)
if ckpt and ckpt.model_checkpoint_path: # model restore
print("Model restored...")
if mode is 'train':
saver.restore(sess, ckpt.model_checkpoint_path)
else:
saver.restore(sess, initial_logs_dir+ckpt_name)
# print(initial_logs_dir)
# saver.save(sess, initial_logs_dir + "/model_bDNN.ckpt", 0) # model save
print("Done")
else:
sess.run(tf.global_variables_initializer()) # if the checkpoint doesn't exist, do initialization
if mode is 'train':
train_data_set = dr.DataReader(input_dir, output_dir, norm_dir, w=w, u=u, name="train") # training data reader initialization
# train_data_set = dr.DataReader(input_dir, output_dir, norm_dir, w=w, u=u, name="train") # training data reader initialization
if mode is 'train':
for itr in range(max_epoch):
train_inputs, train_labels = train_data_set.next_batch(batch_size)
feed_dict = {m_train.inputs: train_inputs, m_train.labels: train_labels,
m_train.keep_probability: dropout_rate}
sess.run(m_train.train_op, feed_dict=feed_dict)
if itr % 10 == 0 and itr >= 0:
train_cost, logits = sess.run([m_train.cost, m_train.logits], feed_dict=feed_dict)
result = bdnn_prediction(batch_size, logits, threshold=th)
raw_indx = int(np.floor(train_labels.shape[1] / 2))
raw_labels = train_labels[:, raw_indx]
raw_labels = raw_labels.reshape((-1, 1))
train_accuracy = np.equal(result, raw_labels)
train_accuracy = train_accuracy.astype(int)
train_accuracy = np.sum(train_accuracy) / batch_size # change to mean...
print("Step: %d, train_cost: %.4f, train_accuracy=%4.4f" % (itr, train_cost, train_accuracy*100))
train_cost_summary_str = sess.run(cost_summary_op, feed_dict={summary_ph: train_cost})
train_accuracy_summary_str = sess.run(accuracy_summary_op, feed_dict={summary_ph: train_accuracy})
train_summary_writer.add_summary(train_cost_summary_str, itr) # write the train phase summary to event files
train_summary_writer.add_summary(train_accuracy_summary_str, itr)
# if train_data_set.eof_checker():
if itr % 50 == 0 and itr > 0:
saver.save(sess, logs_dir + "/model.ckpt", itr) # model save
print('validation start!')
valid_accuracy, valid_cost = \
utils.do_validation(m_valid, sess, valid_file_dir, norm_dir, type='bDNN')
print("valid_cost: %.4f, valid_accuracy=%4.4f" % (valid_cost, valid_accuracy * 100))
valid_cost_summary_str = sess.run(cost_summary_op, feed_dict={summary_ph: valid_cost})
valid_accuracy_summary_str = sess.run(accuracy_summary_op, feed_dict={summary_ph: valid_accuracy})
valid_summary_writer.add_summary(valid_cost_summary_str, itr) # write the train phase summary to event files
valid_summary_writer.add_summary(valid_accuracy_summary_str, itr)
# full_evaluation(m_valid, sess, valid_batch_size, valid_file_dir, valid_summary_writer, summary_dic, itr)
# train_data_set.reader_initialize()
# print('Train data reader was initialized!') # initialize eof flag & num_file & start index
elif mode is 'test':
# full_evaluation(m_valid, sess, valid_batch_size, test_file_dir, valid_summary_writer, summary_dic, 0)
final_softout, final_label = utils.vad_test(m_valid, sess, valid_batch_size, test_file_dir, norm_dir, data_len,
eval_type)
if data_len is None:
return final_softout, final_label
else:
return final_softout[0:data_len, :], final_label[0:data_len, :]
if __name__ == "__main__":
tf.app.run()
| 20,473
| 37.197761
| 134
|
py
|
VAD
|
VAD-master/lib/python/train.py
|
import sys
sys.path.insert(0, './lib/python')
import VAD_Proposed as Vp
import VAD_DNN as Vd
import VAD_bDNN as Vb
import VAD_LSTM_2 as Vl
import scipy.io as sio
import os, getopt
import time
import graph_save as gs
import path_setting as ps
# norm_dir = "./norm_data"
# data_dir = "./sample_data"
# ckpt_name = '/model9918and41.ckpt-2'
# model_dir = "./saved_model"
# valid_batch_size = 4134
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], 'hm:e:', ["prj_dir="])
except getopt.GetoptError as err:
print(str(err))
sys.exit(1)
if len(opts) != 3:
print("arguments are not enough.")
sys.exit(1)
for opt, arg in opts:
if opt == '-h':
sys.exit(0)
elif opt == '-m':
mode = int(arg)
elif opt == '-e':
extract_feat = int(arg)
elif opt == '--prj_dir':
prj_dir = str(arg)
data_dir = prj_dir + '/data/raw'
train_data_dir = data_dir + '/train'
valid_data_dir = data_dir + '/valid'
save_dir = prj_dir + '/data/feat'
train_save_dir = save_dir + '/train'
valid_save_dir = save_dir + '/valid'
if extract_feat:
os.system("rm -rf " + save_dir)
os.system("mkdir " + save_dir)
os.system("mkdir " + save_dir + '/train')
os.system("mkdir " + save_dir + '/valid')
os.system(
"matlab -r \"try acoustic_feat_ex(\'%s\',\'%s\'); catch; end; quit\"" % (train_data_dir, train_save_dir))
os.system(
"matlab -r \"try acoustic_feat_ex(\'%s\',\'%s\'); catch; end; quit\"" % (valid_data_dir, valid_save_dir))
train_norm_dir = save_dir + '/train/global_normalize_factor.mat'
test_norm_dir = prj_dir + '/norm_data/global_normalize_factor.mat'
os.system("cp %s %s" % (train_norm_dir, test_norm_dir))
if mode == 0:
set_path = ps.PathSetting(prj_dir, 'ACAM')
logs_dir = set_path.logs_dir
os.system("rm -rf " + logs_dir + '/train')
os.system("rm -rf " + logs_dir + '/valid')
os.system("mkdir " + logs_dir + '/train')
os.system("mkdir " + logs_dir + '/valid')
Vp.main(prj_dir, 'ACAM', 'train')
# Vp.train_config(save_dir+'/train', save_dir+'/valid', prj_dir+'/logs', batch_size,
# train_step, 'train')
#
# Vp.main()
gs.freeze_graph(prj_dir + '/logs/ACAM', prj_dir + '/saved_model/graph/ACAM', 'model_1/logits,model_1/raw_labels')
if mode == 1:
set_path = ps.PathSetting(prj_dir, 'bDNN')
logs_dir = set_path.logs_dir
os.system("rm -rf " + logs_dir + '/train')
os.system("rm -rf " + logs_dir + '/valid')
os.system("mkdir " + logs_dir + '/train')
os.system("mkdir " + logs_dir + '/valid')
# Vb.train_config(save_dir+'/train', save_dir+'/valid', prj_dir+'/logs', batch_size,
# train_step, 'train')
Vb.main(prj_dir, 'bDNN', 'train')
gs.freeze_graph(prj_dir + '/logs/bDNN', prj_dir + '/saved_model/graph/bDNN', 'model_1/logits,model_1/labels')
# gs.freeze_graph(prj_dir + '/saved_model/temp', prj_dir + '/saved_model/temp', 'model_1/soft_pred,model_1/raw_labels')
if mode == 2:
set_path = ps.PathSetting(prj_dir, 'DNN')
logs_dir = set_path.logs_dir
os.system("rm -rf " + logs_dir + '/train')
os.system("rm -rf " + logs_dir + '/valid')
os.system("mkdir " + logs_dir + '/train')
os.system("mkdir " + logs_dir + '/valid')
Vd.main(prj_dir, 'DNN', 'train')
gs.freeze_graph(prj_dir + '/logs/DNN', prj_dir + '/saved_model/graph/DNN', 'model_1/soft_pred,model_1/raw_labels')
# gs.freeze_graph(prj_dir + '/saved_model/temp', prj_dir + '/saved_model/temp', 'model_1/soft_pred,model_1/raw_labels')
if mode == 3:
set_path = ps.PathSetting(prj_dir, 'LSTM')
logs_dir = set_path.logs_dir
os.system("rm -rf " + logs_dir + '/train')
os.system("rm -rf " + logs_dir + '/valid')
os.system("mkdir " + logs_dir + '/train')
os.system("mkdir " + logs_dir + '/valid')
Vl.main(prj_dir, 'LSTM', 'train')
gs.freeze_graph(prj_dir + '/logs/LSTM', prj_dir + '/saved_model/graph/LSTM', 'model_1/soft_pred,model_1/raw_labels')
# os.system("rm -rf")
print("done")
| 4,385
| 32.227273
| 127
|
py
|
VAD
|
VAD-master/lib/python/utils_jskim.py
|
# Utils used with tensorflow implementation
import tensorflow as tf
import numpy as np
import scipy.misc as misc
import os, sys
from six.moves import urllib
import tarfile
import zipfile
import scipy.io
import re
import data_reader_bDNN as dr
import data_reader_RNN as dr3
__author__ = 'Juntae'
def vad_test(m_eval, sess_eval, batch_size_eval, eval_file_dir, norm_dir, data_len, eval_type):
eval_input_dir = eval_file_dir
eval_output_dir = eval_file_dir + '/Labels'
pad_size = batch_size_eval - data_len % batch_size_eval
eval_data_set = dr.DataReader(eval_input_dir, eval_output_dir, norm_dir, w=19, u=9, name="eval", pad=pad_size)
final_softout, final_label = evaluation(m_eval, eval_data_set, sess_eval, batch_size_eval, eval_type)
return final_softout, final_label
def vad_test2(m_eval, sess_eval, batch_size_eval, eval_file_dir, norm_dir, data_len, eval_type):
eval_input_dir = eval_file_dir
eval_output_dir = eval_file_dir + '/Labels'
pad_size = batch_size_eval - data_len % batch_size_eval
eval_data_set = dr2.DataReader(eval_input_dir, eval_output_dir, norm_dir, w=19, u=9, name="eval2")
final_softout, final_label = evaluation(m_eval, eval_data_set, sess_eval, batch_size_eval, eval_type)
return final_softout, final_label
def vad_test3(m_eval, sess_eval, batch_size_eval, eval_file_dir, norm_dir, data_len, eval_type):
eval_input_dir = eval_file_dir
eval_output_dir = eval_file_dir + '/Labels'
pad_size = batch_size_eval - data_len % batch_size_eval
eval_data_set = dr3.DataReader(eval_input_dir, eval_output_dir, norm_dir, w=5, u=9, name="eval2")
final_softout, final_label = evaluation(m_eval, eval_data_set, sess_eval, batch_size_eval, eval_type)
return final_softout, final_label
def vad_test4(m_eval, sess_eval, batch_size_eval, eval_file_dir, norm_dir, data_len, eval_type):
eval_input_dir = eval_file_dir
eval_output_dir = eval_file_dir + '/Labels'
pad_size = batch_size_eval - data_len % batch_size_eval
eval_data_set = dr4.DataReader(eval_input_dir, eval_output_dir, norm_dir, w=19, u=9, name="eval2")
final_softout, final_label = evaluation(m_eval, eval_data_set, sess_eval, batch_size_eval, eval_type)
return final_softout, final_label
def affine_transform(x, output_dim, seed=0, name=None):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
initializer = tf.truncated_normal_initializer(stddev=0.02, seed=seed)
# weights = tf.get_variable(name + "_w", [x.get_shape()[1], output_dim],
# initializer=tf.contrib.layers.xavier_initializer(seed=seed))
weights = tf.get_variable(name + "_w", [x.get_shape()[1], output_dim],
initializer=initializer)
b = tf.get_variable(name + "_b", [output_dim], initializer=tf.constant_initializer(0.0))
return tf.matmul(x, weights) + b
def evaluation(m_valid, valid_data_set, sess, eval_batch_size, eval_type):
# num_samples = valid_data_set.num_samples
# num_batches = num_samples / batch_size
if eval_type == 0:
final_softout = []
final_label = []
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(eval_batch_size)
feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: valid_labels,
m_valid.keep_probability: 1}
if valid_data_set.eof_checker():
final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
final_label = np.reshape(np.asarray(final_label), [-1, 1])
valid_data_set.reader_initialize()
# print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
valid_soft_result, valid_raw_labels = sess.run([m_valid.soft_result, m_valid.raw_labels],
feed_dict=feed_dict)
final_softout.append(valid_soft_result)
final_label.append(valid_raw_labels)
# if valid_data_set.eof_checker():
# final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
# final_label = np.reshape(np.asarray(final_label), [-1, 1])
# valid_data_set.reader_initialize()
# # print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
# break
return final_softout, final_label
elif eval_type == 1:
final_softout = []
final_label = []
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(eval_batch_size)
feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: valid_labels,
m_valid.keep_probability: 1}
if valid_data_set.eof_checker():
final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
final_label = np.reshape(np.asarray(final_label), [-1, 1])
valid_data_set.reader_initialize()
# print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
valid_cost, valid_logits = sess.run([m_valid.cost, m_valid.logits], feed_dict=feed_dict)
valid_pred, soft_pred = bdnn_prediction(eval_batch_size + 2*valid_data_set._w, valid_logits, threshold=0.6)
# print(np.sum(valid_pred))
raw_indx = int(np.floor(valid_labels.shape[1] / 2))
raw_labels = valid_labels[:, raw_indx]
raw_labels = raw_labels.reshape((-1, 1))
final_softout.append(soft_pred)
final_label.append(raw_labels)
# if valid_data_set.eof_checker():
# final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
# final_label = np.reshape(np.asarray(final_label), [-1, 1])
# valid_data_set.reader_initialize()
# # print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
# break
return final_softout, final_label
elif eval_type == 2:
final_softout = []
final_label = []
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(eval_batch_size)
one_hot_vlabels = valid_labels.reshape((-1, 1))
one_hot_vlabels = dense_to_one_hot(one_hot_vlabels, num_classes=2)
feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: one_hot_vlabels,
m_valid.keep_probability: 1}
if valid_data_set.eof_checker():
final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
final_label = np.reshape(np.asarray(final_label), [-1, 1])
valid_data_set.reader_initialize()
# print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
valid_cost, valid_logits = sess.run([m_valid.cost, m_valid.logits], feed_dict=feed_dict)
# valid_pred, soft_pred = bdnn_prediction(eval_batch_size + 2 * valid_data_set._w, valid_logits, threshold=0.6)
# print(np.sum(valid_pred))
# raw_indx = int(np.floor(valid_labels.shape[1] / 2))
# raw_labels = valid_labels[:, raw_indx]
# raw_labels = raw_labels.reshape((-1, 1))
final_softout.append(valid_logits[:, 1])
final_label.append(valid_labels)
# if valid_data_set.eof_checker():
# final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
# final_label = np.reshape(np.asarray(final_label), [-1, 1])
# valid_data_set.reader_initialize()
# # print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
# break
return final_softout, final_label
elif eval_type == 3:
final_softout = []
final_label = []
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(eval_batch_size)
one_hot_vlabels = valid_labels.reshape((-1, 1))
one_hot_vlabels = dense_to_one_hot(one_hot_vlabels, num_classes=2)
feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: one_hot_vlabels,
m_valid.keep_probability: 1}
if valid_data_set.eof_checker():
final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
final_label = np.reshape(np.asarray(final_label), [-1, 1])
valid_data_set.reader_initialize()
# print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
valid_cost, valid_logits = sess.run([m_valid.cost, m_valid.logits], feed_dict=feed_dict)
# valid_pred, soft_pred = bdnn_prediction(eval_batch_size + 2 * valid_data_set._w, valid_logits, threshold=0.6)
# print(np.sum(valid_pred))
# raw_indx = int(np.floor(valid_labels.shape[1] / 2))
# raw_labels = valid_labels[:, raw_indx]
# raw_labels = raw_labels.reshape((-1, 1))
final_softout.append(valid_logits[:, 1])
final_label.append(valid_labels)
# if valid_data_set.eof_checker():
# final_softout = np.reshape(np.asarray(final_softout), [-1, 1])
# final_label = np.reshape(np.asarray(final_label), [-1, 1])
# valid_data_set.reader_initialize()
# # print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
# break
return final_softout, final_label
def onehot_tensor(label_batch, num_labels):
batch_size = label_batch.get_shape().as_list()[0]
num_labels = tf.cast(num_labels, tf.int32)
sparse_labels = tf.cast(tf.reshape(label_batch, [-1, 1]), dtype=tf.int32)
indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1])
concated = tf.concat(axis=1, values=[indices, sparse_labels])
outshape = tf.stack([batch_size, num_labels])
labels = tf.sparse_to_dense(concated, outshape, 1.0, 0.0)
return labels
def get_model_data(dir_path, model_url):
maybe_download_and_extract(dir_path, model_url)
filename = model_url.split("/")[-1]
filepath = os.path.join(dir_path, filename)
if not os.path.exists(filepath):
raise IOError("VGG Model not found!")
data = scipy.io.loadmat(filepath)
return data
def maybe_download_and_extract(dir_path, url_name, is_tarfile=False, is_zipfile=False):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
filename = url_name.split('/')[-1]
filepath = os.path.join(dir_path, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(url_name, filepath, reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
if is_tarfile:
tarfile.open(filepath, 'r:gz').extractall(dir_path)
elif is_zipfile:
with zipfile.ZipFile(filepath) as zf:
zip_dir = zf.namelist()[0]
zf.extractall(dir_path)
def save_image(image, save_dir, name, mean=None):
"""
Save image by unprocessing if mean given else just save
:param mean:
:param image:
:param save_dir:
:param name:
:return:
"""
if mean:
image = unprocess_image(image, mean)
misc.imsave(os.path.join(save_dir, name + ".png"), image)
def get_variable(weights, name):
init = tf.constant_initializer(weights, dtype=tf.float32)
var = tf.get_variable(name=name, initializer=init, shape=weights.shape)
return var
def weight_variable(shape, stddev=0.02, name=None):
# print(shape)
initial = tf.truncated_normal(shape, stddev=stddev)
#initial = tf.contrib.layers.xavier_initializer_conv2d()
if name is None:
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
def bias_variable(shape, name=None):
initial = tf.constant(0.0, shape=shape)
if name is None:
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
def get_tensor_size(tensor):
from operator import mul
return reduce(mul, (d.value for d in tensor.get_shape()), 1)
def conv2d_basic(x, W, bias, stride=1):
conv = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding="SAME")
return tf.nn.bias_add(conv, bias)
def conv2d_basic_VALID(x, W, bias, stride=1):
conv = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding="VALID")
return tf.nn.bias_add(conv, bias)
def conv2d_strided(x, W, b):
conv = tf.nn.conv2d(x, W, strides=[1, 2, 2, 1], padding="SAME")
return tf.nn.bias_add(conv, b)
def conv2d_transpose_strided(x, W, b, output_shape=None, stride = 2):
# print x.get_shape()
# print W.get_shape()
if output_shape is None:
output_shape = x.get_shape().as_list()
output_shape[1] *= 2
output_shape[2] *= 2
output_shape[3] = W.get_shape().as_list()[2]
# print output_shape
conv = tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding="SAME")
return tf.nn.bias_add(conv, b)
def leaky_relu(x, alpha=0.0, name=""):
return tf.maximum(alpha * x, x, name)
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def max_pool_2x1(x):
return tf.nn.max_pool(x, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1], padding="SAME")
def avg_pool_2x2(x):
return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def local_response_norm(x):
return tf.nn.lrn(x, depth_radius=5, bias=2, alpha=1e-4, beta=0.75)
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5):
"""
Code taken from http://stackoverflow.com/a/34634291/2267819
"""
with tf.variable_scope(scope):
beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
, trainable=True)
gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, 0.02),
trainable=True)
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=decay)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
return normed
def process_image(image, mean_pixel):
return image - mean_pixel
def unprocess_image(image, mean_pixel):
return image + mean_pixel
def bottleneck_unit(x, out_chan1, out_chan2, down_stride=False, up_stride=False, name=None):
"""
Modified implementation from github ry?!
"""
def conv_transpose(tensor, out_channel, shape, strides, name=None):
out_shape = tensor.get_shape().as_list()
in_channel = out_shape[-1]
kernel = weight_variable([shape, shape, out_channel, in_channel], name=name)
shape[-1] = out_channel
return tf.nn.conv2d_transpose(x, kernel, output_shape=out_shape, strides=[1, strides, strides, 1],
padding='SAME', name='conv_transpose')
def conv(tensor, out_chans, shape, strides, name=None):
in_channel = tensor.get_shape().as_list()[-1]
kernel = weight_variable([shape, shape, in_channel, out_chans], name=name)
return tf.nn.conv2d(x, kernel, strides=[1, strides, strides, 1], padding='SAME', name='conv')
def bn(tensor, name=None):
"""
:param tensor: 4D tensor input
:param name: name of the operation
:return: local response normalized tensor - not using batch normalization :(
"""
return tf.nn.lrn(tensor, depth_radius=5, bias=2, alpha=1e-4, beta=0.75, name=name)
in_chans = x.get_shape().as_list()[3]
if down_stride or up_stride:
first_stride = 2
else:
first_stride = 1
with tf.variable_scope('res%s' % name):
if in_chans == out_chan2:
b1 = x
else:
with tf.variable_scope('branch1'):
if up_stride:
b1 = conv_transpose(x, out_chans=out_chan2, shape=1, strides=first_stride,
name='res%s_branch1' % name)
else:
b1 = conv(x, out_chans=out_chan2, shape=1, strides=first_stride, name='res%s_branch1' % name)
b1 = bn(b1, 'bn%s_branch1' % name, 'scale%s_branch1' % name)
with tf.variable_scope('branch2a'):
if up_stride:
b2 = conv_transpose(x, out_chans=out_chan1, shape=1, strides=first_stride, name='res%s_branch2a' % name)
else:
b2 = conv(x, out_chans=out_chan1, shape=1, strides=first_stride, name='res%s_branch2a' % name)
b2 = bn(b2, 'bn%s_branch2a' % name, 'scale%s_branch2a' % name)
b2 = tf.nn.relu(b2, name='relu')
with tf.variable_scope('branch2b'):
b2 = conv(b2, out_chans=out_chan1, shape=3, strides=1, name='res%s_branch2b' % name)
b2 = bn(b2, 'bn%s_branch2b' % name, 'scale%s_branch2b' % name)
b2 = tf.nn.relu(b2, name='relu')
with tf.variable_scope('branch2c'):
b2 = conv(b2, out_chans=out_chan2, shape=1, strides=1, name='res%s_branch2c' % name)
b2 = bn(b2, 'bn%s_branch2c' % name, 'scale%s_branch2c' % name)
x = b1 + b2
return tf.nn.relu(x, name='relu')
def add_to_regularization_and_summary(var):
if var is not None:
tf.summary.histogram(var.op.name, var)
tf.add_to_collection("reg_loss", tf.nn.l2_loss(var))
def add_activation_summary(var):
if var is not None:
tf.summary.histogram(var.op.name + "/activation", var)
tf.summary.scalar(var.op.name + "/sparsity", tf.nn.zero_fraction(var))
def add_gradient_summary(grad, var):
if grad is not None:
tf.summary.histogram(var.op.name + "/gradient", grad)
def get_conv_shape(name):
spec = re.split(':|, |->', name)
kernel_size = int(spec[5])
stride = int(spec[7])
input_fm = int(spec[9])
output_fm = int(spec[10])
conv_shape = [kernel_size, kernel_size, input_fm, output_fm]
return conv_shape, stride
def get_1d_conv_shape(name):
spec = re.split(':|, |->', name)
kernel_size = int(spec[5])
stride = int(spec[7])
input_fm = int(spec[9])
output_fm = int(spec[10])
conv_shape = [kernel_size, 1, input_fm, output_fm]
return conv_shape, stride
def write_val_summary(graph, loss):
with graph.as_default():
val_loss = tf.placeholder(tf.float32, shape=[1], name="loss")
tf.summary.scalar("entropy", val_loss)
summary_op = tf.summary.merge_all()
return summary_op
def conv2lstm_layer(inputs, num_fm):
"""
make the conv_out flat for rnn input
:param inputs:
:param num_fm: # final output feature maps.
:return: outputs: flattened output. shape = (batch_size, num_fm)
"""
shape = inputs.get_shape().as_list()
W = weight_variable([shape[1], shape[2], shape[3], num_fm], name="last_conv_w")
b = bias_variable([num_fm], name="last_conv_b")
conv_last = conv2d_basic_VALID(inputs, W, b)
outputs = tf.nn.relu(conv_last, name="last_relu")
return outputs
def batch_norm_affine_transform(x, output_dim, decay=0, name=None, seed=0, is_training=True):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
# initializer = tf.contrib.layers.xavier_initializer(seed=seed)
w = tf.get_variable(name+"_w", [x.get_shape()[1], output_dim], initializer = tf.contrib.layers.xavier_initializer(seed=seed))
b = tf.get_variable(name+"_b", [output_dim], initializer=tf.constant_initializer(0.0))
affine_result = tf.matmul(x, w) + b
batch_norm_result = tf.contrib.layers.batch_norm(affine_result, decay=decay, is_training=is_training,
updates_collections=None)
return batch_norm_result
def bdnn_transform(inputs, w, u):
# """
# :param inputs. shape = (batch_size, feature_size)
# :param w : decide neighbors
# :param u : decide neighbors
# :return: trans_inputs. shape = (batch_size, feature_size*len(neighbors))
# """
neighbors_1 = np.arange(-w, -u, u)
neighbors_2 = np.array([-1, 0, 1])
neighbors_3 = np.arange(1+u, w+1, u)
neighbors = np.concatenate((neighbors_1, neighbors_2, neighbors_3), axis=0)
pad_size = 2*w + inputs.shape[0]
pad_inputs = np.zeros((pad_size, inputs.shape[1]))
pad_inputs[0:inputs.shape[0], :] = inputs
trans_inputs = [np.roll(pad_inputs, -1*neighbors[i], axis=0)[0:inputs.shape[0], :]
for i in range(neighbors.shape[0])]
trans_inputs = np.asarray(trans_inputs)
trans_inputs = np.transpose(trans_inputs, [1, 0, 2])
trans_inputs = np.reshape(trans_inputs, (trans_inputs.shape[0], -1))
return trans_inputs
def bdnn_prediction(bdnn_batch_size, logits, threshold=0.6, w=19, u=9):
result = np.zeros((bdnn_batch_size, 1))
indx = np.arange(bdnn_batch_size) + 1
indx = indx.reshape((bdnn_batch_size, 1))
indx = bdnn_transform(indx, w, u)
indx = indx[w:(bdnn_batch_size-w), :]
indx_list = np.arange(w, bdnn_batch_size - w)
for i in indx_list:
indx_temp = np.where((indx-1) == i)
pred = logits[indx_temp]
pred = np.sum(pred)/pred.shape[0]
result[i] = pred + np.random.rand(1)*1e-4
result = np.trim_zeros(result)
soft_result = np.float32(result)
result = result >= threshold
return result.astype(int), soft_result
def clipped_relu(x, name=None):
b = tf.get_variable(name+'proposed', [1], initializer=tf.constant_initializer(-.5))
x = tf.maximum((x+0.5), 0)
x = tf.minimum(x, 1)
return x
def cnn_transform(x_mul,w=19, w_max = 19,fromnum = 1,layernum = 16,num_features = 768,name=None, is_training=True):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
num_past = (w-1)/2 - 1
c1_filter = tf.get_variable(name + "_c1", [1,3, fromnum, layernum], initializer=tf.truncated_normal_initializer(stddev=0.02), trainable= is_training)
c2_filter = tf.get_variable(name + "_c2", [1,3, fromnum, layernum], initializer=tf.truncated_normal_initializer(stddev=0.02), trainable= is_training)
c3_filter = tf.get_variable(name + "_c3", [3,3, fromnum, layernum], initializer=tf.truncated_normal_initializer(stddev=0.02), trainable= is_training)
c4_filter = tf.get_variable(name + "_c4", [1,3, fromnum, layernum], initializer=tf.truncated_normal_initializer(stddev=0.02), trainable= is_training)
c5_filter = tf.get_variable(name + "_c5", [1,3, fromnum, layernum], initializer=tf.truncated_normal_initializer(stddev=0.02), trainable= is_training)
z1_filter = tf.Variable(tf.zeros([num_past, 3, fromnum, layernum],dtype=tf.float32),trainable = False )
z2_filter = tf.Variable(tf.zeros([num_past, 3, fromnum, layernum],dtype=tf.float32),trainable = False )
z3_filter = tf.Variable(tf.zeros([num_past, 3, fromnum, layernum],dtype=tf.float32),trainable = False )
z4_filter = tf.Variable(tf.zeros([num_past, 3, fromnum, layernum],dtype=tf.float32),trainable = False )
c_total = tf.concat([c1_filter,z1_filter,c2_filter,z2_filter,c3_filter,z3_filter,c4_filter,z4_filter,c5_filter],0)
if w_max > w:
z0_filter = tf.Variable(tf.zeros([w_max-w, 3, fromnum, layernum],dtype=tf.float32),trainable = False )
z5_filter = tf.Variable(tf.zeros([w_max-w, 3, fromnum, layernum],dtype=tf.float32),trainable = False )
c_total = tf.concat([z0_filter,c_total,z5_filter],0)
cnn_result = tf.nn.conv2d(x_mul,c_total,strides=[1,1,1,1],padding = "SAME")
#b = tf.get_variable(name + "_b", [num_features], initializer=tf.constant_initializer(0.0))
#cnn_result = tf.nn.relu(cnn_result + b)
return cnn_result
def cnn_transform2(x_mul,w=19, w_max = 19,fromnum =1,layernum = 16,num_features = 768,name=None, is_training=True):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
num_past = w-2
c1_filter = tf.get_variable(name + "_c1", [1,3, fromnum, layernum], initializer=tf.truncated_normal_initializer(stddev=0.02), trainable= is_training)
c2_filter = tf.get_variable(name + "_c2", [3,3, fromnum, layernum], initializer=tf.truncated_normal_initializer(stddev=0.02), trainable= is_training)
c3_filter = tf.get_variable(name + "_c3", [1,3, fromnum, layernum], initializer=tf.truncated_normal_initializer(stddev=0.02), trainable= is_training)
z1_filter = tf.Variable(tf.zeros([num_past, 3, fromnum, layernum], dtype=tf.float32), trainable=False)
z2_filter = tf.Variable(tf.zeros([num_past, 3, fromnum, layernum], dtype=tf.float32), trainable=False)
c_total = tf.concat([c1_filter,z1_filter,c2_filter,z2_filter,c3_filter],0)
if w_max > w:
z0_filter = tf.Variable(tf.zeros([w_max-w, 3, fromnum, layernum],dtype=tf.float32),trainable = False )
z3_filter = tf.Variable(tf.zeros([w_max-w, 3, fromnum, layernum],dtype=tf.float32),trainable = False )
c_total = tf.concat([z0_filter,c_total,z3_filter],0)
cnn_result = tf.nn.conv2d(x_mul,c_total,strides=[1,1,1,1],padding = "SAME")
# b = tf.get_variable(name + "_b", [num_features], initializer=tf.constant_initializer(0.0))
# cnn_result = tf.nn.relu(cnn_result+b)
return cnn_result
def cnn_concat(x_mul,fn =1,k=32, w_max = 19, name=None, is_training=True):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
clay19 = cnn_transform(x_mul, w=19, w_max=19,fromnum =fn,layernum = k, num_features = 768,name = name+"clay19", is_training= is_training)
clay17 = cnn_transform(x_mul, w=17, w_max=19,fromnum =fn,layernum = k, num_features = 768,name = name+"clay17", is_training= is_training)
clay13 = cnn_transform(x_mul, w=13, w_max=19,fromnum =fn,layernum = k, num_features = 768,name = name+"clay13", is_training= is_training)
clay11 = cnn_transform(x_mul, w=11, w_max=19,fromnum =fn,layernum = k, num_features = 768,name = name+"clay11", is_training= is_training)
clay09 = cnn_transform2(x_mul, w=9, w_max=19,fromnum =fn,layernum = k, num_features = 768,name = name+"clay09", is_training= is_training)
clay07 = cnn_transform2(x_mul, w=7, w_max=19,fromnum =fn,layernum = k, num_features = 768,name = name+"clay07", is_training= is_training)
clay05 = cnn_transform2(x_mul, w=5, w_max=19,fromnum =fn,layernum = k, num_features = 768,name = name+"clay05", is_training= is_training)
clay03 = cnn_transform2(x_mul, w=3, w_max=19,fromnum =fn,layernum = k, num_features = 768,name = name+"clay03", is_training= is_training)
cnn_result = tf.concat([clay19,clay17,clay13,clay11,clay09,clay07,clay05,clay03],3)
return cnn_result
def cnn_concat2(x_mul, fn = 64, k = 128, w_max = 19, name=None, is_training=True):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
c_total = tf.get_variable(name + "_cnn_norm_1", [3, 3, fn, k], initializer=tf.truncated_normal_initializer(stddev=0.02), trainable= is_training)
cnn_result = tf.nn.conv2d(x_mul, c_total, strides=[1, 1, 1, 1], padding="SAME")
return cnn_result
def cnn_concat3(x_mul,fn =1,k=32, w_max = 19, name=None, is_training=True):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
clay19 = cnn_transform(x_mul, w=19, w_max=19,fromnum =fn,layernum = k, num_features = 768,name = name+"clay19", is_training= is_training)
# clay17 = cnn_transform(x_mul, w=17, w_max=19,fromnum =fn,layernum = k, num_features = 768,name = name+"clay17", is_training= is_training)
clay13 = cnn_transform(x_mul, w=13, w_max=19,fromnum =fn,layernum = k, num_features = 768,name = name+"clay13", is_training= is_training)
# clay11 = cnn_transform(x_mul, w=11, w_max=19,fromnum =fn,layernum = k, num_features = 768,name = name+"clay11", is_training= is_training)
clay09 = cnn_transform2(x_mul, w=9, w_max=19,fromnum =fn,layernum = k, num_features = 768,name = name+"clay09", is_training= is_training)
# clay07 = cnn_transform2(x_mul, w=7, w_max=19,fromnum =fn,layernum = k, num_features = 768,name = name+"clay07", is_training= is_training)
clay05 = cnn_transform2(x_mul, w=5, w_max=19,fromnum =fn,layernum = k, num_features = 768,name = name+"clay05", is_training= is_training)
clay03 = cnn_transform2(x_mul, w=3, w_max=19,fromnum =fn,layernum = k, num_features = 768,name = name+"clay03", is_training= is_training)
cnn_result = tf.concat([clay19,clay13,clay09,clay05,clay03],3)
return cnn_result
def dense_to_one_hot(labels_dense, num_classes=2):
"""Convert class labels from scalars to one-hot vectors."""
# copied from TensorFlow tutorial
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[(index_offset + labels_dense.ravel()).astype(int)] = 1
return labels_one_hot.astype(np.float32)
| 30,172
| 41.023677
| 153
|
py
|
VAD
|
VAD-master/lib/python/data_reader_DNN.py
|
import numpy as np
import os
import glob
import utils
import scipy.io as sio
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
class DataReader(object):
def __init__(self, input_dir, output_dir, norm_dir, w=19, u=9, name=None, pad=None):
# print(name.title() + " data reader initialization...")
self._input_dir = input_dir
self._output_dir = output_dir
self._norm_dir = norm_dir
self._input_file_list = sorted(glob.glob(input_dir+'/*.bin'))
self._input_spec_list = sorted(glob.glob(input_dir+'/*.txt'))
self._output_file_list = sorted(glob.glob(output_dir+'/*.bin'))
self._file_len = len(self._input_file_list)
self._name = name
assert self._file_len == len(self._output_file_list), "# input files and output file is not matched"
self._num_file = 0
self._start_idx = 0
if pad is not None:
self._inputs = self._read_input(self._input_file_list[self._num_file], self._input_spec_list[self._num_file])
self._inputs = np.concatenate((self._inputs, np.zeros((pad, self._inputs.shape[1]), dtype=np.float32)))
self._outputs = self._read_output(self._output_file_list[self._num_file])
self._outputs = np.concatenate((self._outputs, np.zeros((pad, self._outputs.shape[1]), dtype=np.float32)))
else:
self._inputs = self._read_input(self._input_file_list[self._num_file], self._input_spec_list[self._num_file])
self._outputs = self._read_output(self._output_file_list[self._num_file])
self._w = w
self._u = u
self.eof = False
self.file_change = False
self._outputs = self._outputs[0:self._inputs.shape[0]]
self._num_figure = 1
assert np.shape(self._inputs)[0] == np.shape(self._outputs)[0], \
("# samples is not matched between input: %d and output: %d files"
% (np.shape(self._inputs)[0], np.shape(self._outputs)[0]))
self.num_samples = np.shape(self._outputs)[0]
norm_param = sio.loadmat(self._norm_dir+'/global_normalize_factor.mat')
self.train_mean = norm_param['global_mean']
self.train_std = norm_param['global_std']
# print("Done")
# print("BOF : " + self._name + " file_" + str(self._num_file).zfill(2))
def _binary_read_with_shape(self):
pass
@staticmethod
def _read_input(input_file_dir, input_spec_dir):
data = np.fromfile(input_file_dir, dtype=np.float32) # (# total frame, feature_size)
with open(input_spec_dir,'r') as f:
spec = f.readline()
size = spec.split(',')
data = data.reshape((int(size[0]), int(size[1])), order='F')
return data
@staticmethod
def _read_output(output_file_dir):
data = np.fromfile(output_file_dir, dtype=np.float32) # data shape : (# total frame,)
data = data.reshape(-1, 1) # data shape : (# total frame, 1)
return data
def next_batch(self, batch_size):
if self._start_idx + batch_size > self.num_samples:
self._start_idx = 0
self.file_change = True
self._num_file += 1
# print("EOF : " + self._name + " file_" + str(self._num_file-1).zfill(2) +
# " -> BOF : " + self._name + " file_" + str(self._num_file).zfill(2))
if self._num_file > self._file_len - 1:
self.eof = True
self._num_file = 0
# print("EOF : last " + self._name + " file. " + "-> BOF : " + self._name + " file_" +
# str(self._num_file).zfill(2))
self._inputs = self._read_input(self._input_file_list[self._num_file], self._input_spec_list[self._num_file])
self._outputs = self._read_output(self._output_file_list[self._num_file])
data_len = np.shape(self._inputs)[0]
self._outputs = self._outputs[0:data_len, :]
assert np.shape(self._inputs)[0] == np.shape(self._outputs)[0], \
("# samples is not matched between input: %d and output: %d files"
% (np.shape(self._inputs)[0], np.shape(self._outputs)[0]))
self.num_samples = np.shape(self._outputs)[0]
# print("current file number : %d, samples : %d" % (self._num_file + 1, self.num_samples))
#print("Loaded " + self._name + " file number : %d" % (self._num_file + 1))
else:
self.file_change = False
self.eof = False
inputs = self._inputs[self._start_idx:self._start_idx + batch_size, :]
inputs = self.normalize(inputs)
inputs = utils.bdnn_transform(inputs, self._w, self._u)
inputs = inputs[self._w: (batch_size-self._w), :]
outputs = self._outputs[self._start_idx:self._start_idx + batch_size, :]
# if valid:
# plt.figure(self._num_figure)
# self._num_figure += 1
# bb = np.zeros(aa.shape)
# bb[:, 200:250] = outputs*10
#
# cc = aa + bb
# imgplot = plt.imshow(cc.T)
# plt.show()
outputs = outputs[self._w: (batch_size - self._w), :]
self._start_idx += batch_size
# print(self._start_idx)
# print(self.num_samples)
return inputs, outputs
#num_batches = (np.shape(self._outputs)[0] - np.shape(self._outputs)[0] % batch_size) / batch_size
def normalize(self, x):
x = (x - self.train_mean)/self.train_std
# a = (np.std(x, axis=0))
return x
def reader_initialize(self):
self._num_file = 0
self._start_idx = 0
self.eof = False
def eof_checker(self):
return self.eof
def file_change_checker(self):
return self.file_change
def file_change_initialize(self):
self.file_change = False
def dense_to_one_hot(labels_dense, num_classes=2):
"""Convert class labels from scalars to one-hot vectors."""
# copied from TensorFlow tutorial
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
# file_dir = "/home/sbie/github/VAD_KJT/Datamake/Database/Aurora2withSE"
# input_dir1 = file_dir + "/STFT2"
# output_dir1 = file_dir + "/Labels"
# dr = DataReader(input_dir1, output_dir1, input_dir1,name='test')
#
# for i in range(1000000):
# tt, pp = dr.next_batch(500)
# print("asdf")
| 6,592
| 36.674286
| 121
|
py
|
VAD
|
VAD-master/lib/python/VAD_DNN.py
|
import tensorflow as tf
import numpy as np
import utils as utils
import re
import data_reader_DNN_v2 as dr
import os, sys
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn import metrics
from scipy.optimize import brentq
from scipy.interpolate import interp1d
mode = 'test'
file_dir = "/home/sbie/storage/VAD_Database/SE_TIMIT_MRCG_0328"
input_dir = file_dir
output_dir = file_dir + "/Labels"
valid_file_dir = "/home/sbie/storage/VAD_Database/NX_TIMIT_MRCG_small"
test_file_dir = "/home/sbie/storage2/VAD_Database/NX_TIMIT_MRCG_big"
norm_dir = input_dir
initial_logs_dir = "/home/sbie/storage2/VAD_Database/saved_model/my_converted_checkpoint2"
logs_dir = "/home/sbie/github/VAD_Project/VAD_DNN/logs_DNN"
ckpt_name = '/DNN'
reset = True # remove all existed logs and initialize log directories
device = '/gpu:0'
if mode is 'test':
reset = False
logs_dir = './saved_model'
if reset:
os.popen('rm -rf ' + logs_dir + '/*')
os.popen('mkdir ' + logs_dir + '/train')
os.popen('mkdir ' + logs_dir + '/valid')
summary_list = ["cost", "accuracy_SNR_-5", "accuracy_SNR_0", "accuracy_SNR_5", "accuracy_SNR_10",
"accuracy_across_all_SNRs"]
# learning_rate = 0.00733
learning_rate = 0.0001
eval_num_batches = 2e5
SMALL_NUM = 1e-4
max_epoch = int(1000)
dropout_rate = 0.5
decay = 0.9 # batch normalization decay factor
w = 19 # w default = 19
u = 9 # u default = 9
eval_th = 0.5
th = 0.5
num_hidden_1 = 512
num_hidden_2 = 512
model_config = {"w": w, "u": u, "num_hidden_1": num_hidden_1, "num_hidden_2": num_hidden_2}
batch_size = 4096 + 2*w # batch_size = 32
valid_batch_size = batch_size
assert (w-1) % u == 0, "w-1 must be divisible by u"
num_features = 768 # MRCG feature
bdnn_winlen = (((w-1) / u) * 2) + 3
bdnn_inputsize = int(bdnn_winlen * num_features)
bdnn_outputsize = 2
data_len = None
eval_type = 2
def test_config(c_test_dir, c_norm_dir, c_initial_logs_dir, c_batch_size_eval, c_data_len):
global test_file_dir
global norm_dir
global initial_logs_dir
global ckpt_name
global valid_batch_size
global data_len
test_file_dir = c_test_dir
norm_dir = c_norm_dir
initial_logs_dir = c_initial_logs_dir
valid_batch_size = c_batch_size_eval
data_len = c_data_len
def affine_transform(x, output_dim, name=None):
"""
affine transformation Wx+b
assumes x.shape = (batch_size, num_features)
"""
w = tf.get_variable(name + "_w", [x.get_shape()[1], output_dim], initializer=tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable(name + "_b", [output_dim], initializer=tf.constant_initializer(0.0))
return tf.matmul(x, w) + b
def summary_generation(eval_file_dir):
summary_dic = {}
noise_list = os.listdir(eval_file_dir)
noise_list = sorted(noise_list)
summary_dic["summary_ph"] = summary_ph = tf.placeholder(dtype=tf.float32)
for name in noise_list:
with tf.variable_scope(name):
for summary_name in summary_list:
summary_dic[name+"_"+summary_name] = tf.summary.scalar(summary_name, summary_ph)
with tf.variable_scope("Averaged_Results"):
summary_dic["cost_across_all_noise_types"] = tf.summary.scalar("cost_across_all_noise_types", summary_ph)
summary_dic["accuracy_across_all_noise_types"]\
= tf.summary.scalar("accuracy_across_all_noise_types", summary_ph)
summary_dic["variance_across_all_noise_types"]\
= tf.summary.scalar("variance_across_all_noise_types", summary_ph)
return summary_dic
def inference(inputs, keep_prob, is_training=True):
# initialization
# h1_out = affine_transform(inputs, num_hidden_1, name="hidden_1")
h1_out = utils.batch_norm_affine_transform(inputs, num_hidden_1, name="hidden_1", decay=decay, is_training=is_training)
h1_out = tf.nn.relu(h1_out)
h1_out = tf.nn.dropout(h1_out, keep_prob=keep_prob)
# h2_out = utils.batch_norm_affine_transform(h1_out, num_hidden_2, name="hidden_2")
h2_out = utils.batch_norm_affine_transform(h1_out, num_hidden_2, name="hidden_2", decay=decay, is_training=is_training)
h2_out = tf.nn.relu(h2_out)
h2_out = tf.nn.dropout(h2_out, keep_prob=keep_prob)
logits = affine_transform(h2_out, 2, name="output")
return logits
def train(loss_val, var_list):
lrDecayRate = .96
lrDecayFreq = 200
momentumValue = .9
global_step = tf.Variable(0, trainable=False)
lr = tf.train.exponential_decay(learning_rate, global_step, lrDecayFreq, lrDecayRate, staircase=True)
# define the optimizer
# optimizer = tf.train.MomentumOptimizer(lr, momentumValue)
# optimizer = tf.train.AdagradOptimizer(lr)
#
optimizer = tf.train.AdamOptimizer(lr)
grads = optimizer.compute_gradients(loss_val, var_list=var_list)
return optimizer.apply_gradients(grads, global_step=global_step)
def bdnn_prediction(bdnn_batch_size, logits, threshold=th):
result = np.zeros((bdnn_batch_size, 1))
indx = np.arange(bdnn_batch_size) + 1
indx = indx.reshape((bdnn_batch_size, 1))
indx = utils.bdnn_transform(indx, w, u)
indx = indx[w:(bdnn_batch_size-w), :]
indx_list = np.arange(w, bdnn_batch_size - w)
for i in indx_list:
indx_temp = np.where((indx-1) == i)
pred = logits[indx_temp]
pred = np.sum(pred)/pred.shape[0]
result[i] = pred
result = np.trim_zeros(result)
result = result >= threshold
return result.astype(int)
def full_evaluation(m_eval, sess_eval, batch_size_eval, eval_file_dir, summary_writer, summary_dic, itr):
mean_cost = []
mean_accuracy = []
mean_auc = []
print("-------- Performance for each of noise types --------")
noise_list = os.listdir(eval_file_dir)
noise_list = sorted(noise_list)
summary_ph = summary_dic["summary_ph"]
for i in range(len(noise_list)):
noise_name = '/' + noise_list[i]
eval_input_dir = eval_file_dir + noise_name
eval_output_dir = eval_file_dir + noise_name + '/Labels'
eval_data_set = dr.DataReader(eval_input_dir, eval_output_dir, norm_dir, w=w, u=u, name="eval")
eval_cost, eval_accuracy, eval_list, eval_auc, eval_auc_list = evaluation(m_eval, eval_data_set, sess_eval, batch_size_eval)
print("--noise type : " + noise_list[i])
print("cost: %.4f, accuracy across all SNRs: %.4f, auc across all SNRS: %.4f" % (eval_cost, eval_accuracy*100, eval_auc))
print('accuracy wrt SNR:')
print('SNR_-5 : %.4f, SNR_0 : %.4f, SNR_5 : %.4f, SNR_10 : %.4f' % (eval_list[0]*100, eval_list[1]*100,
eval_list[2]*100, eval_list[3]*100))
print('AUC wrt SNR:')
print('SNR_-5 : %.4f, SNR_0 : %.4f, SNR_5 : %.4f, SNR_10 : %.4f' % (eval_auc_list[0], eval_auc_list[1],
eval_auc_list[2], eval_auc_list[3]))
print('')
eval_summary_list = [eval_cost] + eval_list + [eval_accuracy]
for j, summary_name in enumerate(summary_list):
summary_str = sess_eval.run(summary_dic[noise_list[i]+"_"+summary_name], feed_dict={summary_ph: eval_summary_list[j]})
summary_writer.add_summary(summary_str, itr)
mean_cost.append(eval_cost)
mean_accuracy.append(eval_accuracy)
mean_auc.append(eval_auc)
mean_cost = np.mean(np.asarray(mean_cost))
var_accuracy = np.var(np.asarray(mean_accuracy))
mean_accuracy = np.mean(np.asarray(mean_accuracy))
mean_auc = np.mean(np.asarray(mean_auc))
summary_writer.add_summary(sess_eval.run(summary_dic["cost_across_all_noise_types"],
feed_dict={summary_ph: mean_cost}), itr)
summary_writer.add_summary(sess_eval.run(summary_dic["accuracy_across_all_noise_types"],
feed_dict={summary_ph: mean_accuracy}), itr)
summary_writer.add_summary(sess_eval.run(summary_dic["variance_across_all_noise_types"],
feed_dict={summary_ph: var_accuracy}), itr)
print("-------- Performance across all of noise types --------")
print("cost : %.4f" % mean_cost)
print("******* averaged accuracy across all noise_types : %.4f *******" % (mean_accuracy*100))
print("******* averaged auc across all noise_types : %.4f *******" % mean_auc)
print("******* variance of accuracies across all noise_types : %4.4f *******" % (var_accuracy*100))
def evaluation(m_valid, valid_data_set, sess, eval_batch_size, num_batches=eval_num_batches):
avg_valid_auc = 0.
avg_valid_cost = 0.
avg_valid_accuracy = 0.
itr_sum = 0.
auc_list = [0 for i in range(valid_data_set._file_len)]
accuracy_list = [0 for i in range(valid_data_set._file_len)]
cost_list = [0 for i in range(valid_data_set._file_len)]
itr_file = 0
while True:
valid_inputs, valid_labels = valid_data_set.next_batch(eval_batch_size)
if valid_data_set.file_change_checker():
auc_list[itr_file] = avg_valid_auc / itr_sum
accuracy_list[itr_file] = avg_valid_accuracy / itr_sum
cost_list[itr_file] = avg_valid_cost / itr_sum
avg_valid_accuracy = 0.
avg_valid_cost = 0.
avg_valid_auc = 0.
itr_sum = 0
itr_file += 1
valid_data_set.file_change_initialize()
if valid_data_set.eof_checker():
valid_data_set.reader_initialize()
print('Valid data reader was initialized!') # initialize eof flag & num_file & start index
break
one_hot_labels = valid_labels.reshape((-1, 1))
one_hot_labels = dense_to_one_hot(one_hot_labels, num_classes=2)
feed_dict = {m_valid.inputs: valid_inputs, m_valid.labels: one_hot_labels,
m_valid.keep_probability: 1}
valid_cost, valid_accuracy, valid_softpred, valid_raw_labels\
= sess.run([m_valid.cost, m_valid.accuracy, m_valid.softpred, m_valid.raw_labels], feed_dict=feed_dict)
fpr, tpr, thresholds = metrics.roc_curve(valid_raw_labels, valid_softpred, pos_label=1)
valid_auc = metrics.auc(fpr, tpr)
avg_valid_auc += valid_auc
avg_valid_cost += valid_cost
avg_valid_accuracy += valid_accuracy
itr_sum += 1
total_avg_valid_auc = np.asscalar(np.mean(np.asarray(auc_list)))
total_avg_valid_cost = np.asscalar(np.mean(np.asarray(cost_list)))
total_avg_valid_accuracy = np.asscalar(np.mean(np.asarray(accuracy_list)))
return total_avg_valid_cost, total_avg_valid_accuracy, accuracy_list, total_avg_valid_auc, auc_list
def dense_to_one_hot(labels_dense, num_classes=2):
"""Convert class labels from scalars to one-hot vectors."""
# copied from TensorFlow tutorial
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[(index_offset + labels_dense.ravel()).astype(int)] = 1
return labels_one_hot.astype(np.float32)
class Model(object):
def __init__(self, is_training=True):
self.keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
self.inputs = inputs = tf.placeholder(tf.float32, shape=[None, bdnn_inputsize],
name="inputs")
self.labels = labels = tf.placeholder(tf.float32, shape=[None, 2],
name="labels")
# set inference graph
self.logits = logits = inference(inputs, self.keep_probability, is_training=is_training) # (batch_size, bdnn_outputsize)
# set objective function
pred = tf.argmax(logits, axis=1, name="prediction")
softpred = tf.identity(logits[:, 1], name="soft_pred")
pred = tf.cast(pred, tf.int32)
truth = tf.cast(labels[:, 1], tf.int32)
self.raw_labels = tf.identity(truth, name="raw_labels")
self.softpred = softpred
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(pred, truth), tf.float32))
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))
# self.cost = cost = tf.reduce_mean(cost)
# cost = tf.reduce_sum(tf.square(labels - logits), axis=1)
# self.cost = cost = tf.reduce_mean(cost)
# self.sigm = tf.sigmoid(logits)
# set training strategy
trainable_var = tf.trainable_variables()
self.train_op = train(self.cost, trainable_var)
def main(prj_dir=None, model=None, mode=None):
# Configuration Part #
if mode is 'train':
import path_setting as ps
set_path = ps.PathSetting(prj_dir, model)
logs_dir = initial_logs_dir = set_path.logs_dir
input_dir = set_path.input_dir
output_dir = set_path.output_dir
norm_dir = set_path.norm_dir
valid_file_dir = set_path.valid_file_dir
sys.path.insert(0, prj_dir+'/configure/DNN')
import config as cg
global learning_rate, dropout_rate, max_epoch, batch_size, valid_batch_size
learning_rate = cg.lr
dropout_rate = cg.dropout_rate
max_epoch = cg.max_epoch
batch_size = valid_batch_size = cg.batch_size
global w, u
w = cg.w
u = cg.u
global bdnn_winlen, bdnn_inputsize, bdnn_outputsize
bdnn_winlen = (((w-1) / u) * 2) + 3
bdnn_inputsize = int(bdnn_winlen * num_features)
bdnn_outputsize = 2
global num_hidden_1, num_hidden_2
num_hidden_1 = cg.num_hidden_1
num_hidden_2 = cg.num_hidden_2
# Graph Part #
print("Graph initialization...")
with tf.device(device):
with tf.variable_scope("model", reuse=None):
m_train = Model(is_training=True)
with tf.variable_scope("model", reuse=True):
m_valid = Model(is_training=False)
print("Done")
# Summary Part #
print("Setting up summary op...")
summary_ph = tf.placeholder(dtype=tf.float32)
with tf.variable_scope("Training_procedure"):
cost_summary_op = tf.summary.scalar("cost", summary_ph)
accuracy_summary_op = tf.summary.scalar("accuracy", summary_ph)
# summary_dic = summary_generation(valid_file_dir)
print("Done")
# Model Save Part #
print("Setting up Saver...")
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(logs_dir + '/DNN')
print("Done")
# Session Part #
sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
sess_config.gpu_options.allow_growth = True
sess = tf.Session(config=sess_config)
if mode is 'train':
train_summary_writer = tf.summary.FileWriter(logs_dir + '/train/', sess.graph, max_queue=2)
valid_summary_writer = tf.summary.FileWriter(logs_dir + '/valid/', max_queue=2)
if ckpt and ckpt.model_checkpoint_path: # model restore
print("Model restored...")
if mode is 'train':
saver.restore(sess, ckpt.model_checkpoint_path)
else:
saver.restore(sess, initial_logs_dir+ckpt_name)
# saver.save(sess, logs_dir + "/model_DNN.ckpt", 0) # model save
print("Done")
else:
sess.run(tf.global_variables_initializer()) # if the checkpoint doesn't exist, do initialization
if mode is 'train':
train_data_set = dr.DataReader(input_dir, output_dir, norm_dir, w=w, u=u, name="train") # training data reader initialization
if mode is 'train':
for itr in range(max_epoch):
train_inputs, train_labels = train_data_set.next_batch(batch_size)
# imgplot = plt.imshow(train_inputs)
# plt.show()
one_hot_labels = train_labels.reshape((-1, 1))
one_hot_labels = dense_to_one_hot(one_hot_labels, num_classes=2)
feed_dict = {m_train.inputs: train_inputs, m_train.labels: one_hot_labels,
m_train.keep_probability: dropout_rate}
sess.run(m_train.train_op, feed_dict=feed_dict)
if itr % 10 == 0 and itr >= 0:
# train_cost, train_softpred, train_raw_labels \
# = sess.run([m_train.cost, m_train.softpred, m_train.raw_labels], feed_dict=feed_dict)
# fpr, tpr, thresholds = metrics.roc_curve(train_raw_labels, train_softpred, pos_label=1)
# train_auc = metrics.auc(fpr, tpr)
train_cost, train_accuracy \
= sess.run([m_train.cost, m_train.accuracy], feed_dict=feed_dict)
print("Step: %d, train_cost: %.4f, train_accuracy=%4.4f" % (itr, train_cost, train_accuracy*100))
train_cost_summary_str = sess.run(cost_summary_op, feed_dict={summary_ph: train_cost})
train_accuracy_summary_str = sess.run(accuracy_summary_op, feed_dict={summary_ph: train_accuracy})
train_summary_writer.add_summary(train_cost_summary_str, itr) # write the train phase summary to event files
train_summary_writer.add_summary(train_accuracy_summary_str, itr)
# if train_data_set.eof_checker():
if itr % 50 == 0 and itr > 0:
saver.save(sess, logs_dir + "/model.ckpt", itr) # model save
print('validation start!')
valid_accuracy, valid_cost = \
utils.do_validation(m_valid, sess, valid_file_dir, norm_dir, type='DNN')
print("valid_cost: %.4f, valid_accuracy=%4.4f" % (valid_cost, valid_accuracy * 100))
valid_cost_summary_str = sess.run(cost_summary_op, feed_dict={summary_ph: valid_cost})
valid_accuracy_summary_str = sess.run(accuracy_summary_op, feed_dict={summary_ph: valid_accuracy})
valid_summary_writer.add_summary(valid_cost_summary_str, itr) # write the train phase summary to event files
valid_summary_writer.add_summary(valid_accuracy_summary_str, itr)
# full_evaluation(m_valid, sess, valid_batch_size, valid_file_dir, valid_summary_writer, summary_dic, itr)
elif mode is 'test':
# full_evaluation(m_valid, sess, valid_batch_size, test_file_dir, valid_summary_writer, summary_dic, 0)
final_softout, final_label = utils.vad_test(m_valid, sess, valid_batch_size, test_file_dir, norm_dir, data_len,
eval_type)
if data_len is None:
return final_softout, final_label
else:
return final_softout[0:data_len, :], final_label[0:data_len, :]
if __name__ == "__main__":
tf.app.run()
| 19,112
| 37.45674
| 134
|
py
|
VAD
|
VAD-master/lib/python/data_reader_RNN.py
|
import numpy as np
import os
import glob
import utils
import scipy.io as sio
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
class DataReader(object):
def __init__(self, input_dir, output_dir, norm_dir, target_delay=19, u=9, name=None):
# print(name.title() + " data reader initialization...")
self._input_dir = input_dir
self._output_dir = output_dir
self._norm_dir = norm_dir
self._input_file_list = sorted(glob.glob(input_dir+'/*.bin'))
self._input_spec_list = sorted(glob.glob(input_dir+'/*.txt'))
self._output_file_list = sorted(glob.glob(output_dir+'/*.bin'))
self._file_len = len(self._input_file_list)
self._name = name
assert self._file_len == len(self._output_file_list), "# input files and output file is not matched"
self._w = target_delay
self._u = u
self.eof = False
self.file_change = False
self.num_samples = 0
self._inputs = 0
self._outputs = 0
self._epoch = 1
self._num_file = 0
self._start_idx = 0 #
norm_param = sio.loadmat(self._norm_dir+'/global_normalize_factor.mat')
self.train_mean = norm_param['global_mean']
self.train_std = norm_param['global_std']
# print("Done")
# print("BOF : " + self._name + " file_" + str(self._num_file).zfill(2))
def _binary_read_with_shape(self):
pass
@staticmethod
def _read_input(input_file_dir, input_spec_dir):
data = np.fromfile(input_file_dir, dtype=np.float32) # (# total frame, feature_size)
with open(input_spec_dir,'r') as f:
spec = f.readline()
size = spec.split(',')
data = data.reshape((int(size[0]), int(size[1])), order='F')
return data
@staticmethod
def _read_output(output_file_dir):
data = np.fromfile(output_file_dir, dtype=np.float32) # data shape : (# total frame,)
data = data.reshape(-1, 1) # data shape : (# total frame, 1)
return data
@staticmethod
def _padding(inputs, batch_size, w_val):
pad_size = batch_size - inputs.shape[0] % batch_size
inputs = np.concatenate((inputs, np.zeros((pad_size, inputs.shape[1]), dtype=np.float32)))
window_pad = np.zeros((w_val, inputs.shape[1]))
inputs = np.concatenate((inputs, window_pad), axis=0)
return inputs
def next_batch(self, batch_size):
if self._start_idx == 0:
self._inputs = self._padding(
self._read_input(self._input_file_list[self._num_file],
self._input_spec_list[self._num_file]), batch_size, self._w)
self._outputs = self._padding(self._read_output(self._output_file_list[self._num_file]), batch_size, self._w)
assert np.shape(self._inputs)[0] == np.shape(self._outputs)[0], \
("# samples is not matched between input: %d and output: %d files"
% (np.shape(self._inputs)[0], np.shape(self._outputs)[0]))
self.num_samples = np.shape(self._outputs)[0]
if self._start_idx + batch_size > self.num_samples:
self._start_idx = 0
self.file_change = True
self._num_file += 1
# print("EOF : " + self._name + " file_" + str(self._num_file-1).zfill(2) +
# " -> BOF : " + self._name + " file_" + str(self._num_file).zfill(2))
if self._num_file > self._file_len - 1:
self.eof = True
self._num_file = 0
# print("EOF : last " + self._name + " file. " + "-> BOF : " + self._name + " file_" +
# str(self._num_file).zfill(2))
self._inputs = self._padding(
self._read_input(self._input_file_list[self._num_file],
self._input_spec_list[self._num_file]), batch_size, self._w)
self._outputs = self._padding(self._read_output(self._output_file_list[self._num_file]), batch_size, self._w)
data_len = np.shape(self._inputs)[0]
self._outputs = self._outputs[0:data_len, :]
assert np.shape(self._inputs)[0] == np.shape(self._outputs)[0], \
("# samples is not matched between input: %d and output: %d files"
% (np.shape(self._inputs)[0], np.shape(self._outputs)[0]))
self.num_samples = np.shape(self._outputs)[0]
else:
self.file_change = False
self.eof = False
inputs = self._inputs
outputs = self._outputs
'''data mini batching part'''
inputs = inputs[self._start_idx:self._start_idx + batch_size + self._w, :]
outputs = outputs[self._start_idx:self._start_idx + batch_size, :]
self._start_idx += batch_size
return inputs, outputs
# def next_batch(self, batch_size):
#
# if self._start_idx + batch_size + self._w > self.num_samples:
#
# self._start_idx = 0 #
# self.file_change = True
# self._num_file += 1
#
# print("EOF : " + self._name + " file_" + str(self._num_file - 1).zfill(2) +
# " -> BOF : " + self._name + " file_" + str(self._num_file).zfill(2))
#
# if self._num_file > self._file_len - 1:
# self.eof = True
# self._num_file = 0
# print("EOF : last " + self._name + " file. " + "-> BOF : " + self._name + " file_" +
# str(self._num_file).zfill(2))
#
# self._inputs = self._read_input(self._input_file_list[self._num_file], self._input_spec_list[self._num_file])
# self._outputs = self._read_output(self._output_file_list[self._num_file])
#
# data_len = np.shape(self._inputs)[0]
# self._outputs = self._outputs[0:data_len, :]
#
# assert np.shape(self._inputs)[0] == np.shape(self._outputs)[0], \
# ("# samples is not matched between input: %d and output: %d files"
# % (np.shape(self._inputs)[0], np.shape(self._outputs)[0]))
#
# self.num_samples = np.shape(self._outputs)[0]
# # print("current file number : %d, samples : %d" % (self._num_file + 1, self.num_samples))
# # print("Loaded " + self._name + " file number : %d" % (self._num_file + 1))
#
# else:
# self.file_change = False
# self.eof = False
#
# inputs = self._inputs
# outputs = self._outputs
#
# '''data mini batching part'''
#
# inputs = inputs[self._start_idx:self._start_idx + batch_size + self._w, :]
# outputs = outputs[self._start_idx:self._start_idx + batch_size, :]
#
# self._start_idx += batch_size
#
# return inputs, outputs
def normalize(self, x):
x = (x - self.train_mean)/self.train_std
return x
def reader_initialize(self):
self._num_file = 0
self._start_idx = 0
self.eof = False
def eof_checker(self):
return self.eof
def file_change_checker(self):
return self.file_change
def file_change_initialize(self):
self.file_change = False
def set_random_batch(self, batch_size):
self._start_idx = np.maximum(0, np.random.random_integers(self.num_samples - batch_size))
def dense_to_one_hot(labels_dense, num_classes=2):
"""Convert class labels from scalars to one-hot vectors."""
# copied from TensorFlow tutorial
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
# file_dir = "/home/sbie/github/VAD_KJT/Datamake/Database/Aurora2withSE"
# input_dir1 = file_dir + "/STFT2"
# output_dir1 = file_dir + "/Labels"
# dr = DataReader(input_dir1, output_dir1, input_dir1,name='test')
#
# for i in range(1000000):
# tt, pp = dr.next_batch(500)
# print("asdf")
| 8,149
| 35.222222
| 123
|
py
|
VAD
|
VAD-master/lib/python/data_reader_bDNN_v2.py
|
import numpy as np
import os
import glob
import utils
import scipy.io as sio
class DataReader(object):
def __init__(self, input_dir, output_dir, norm_dir, w=19, u=9, name=None):
# print(name + " data reader initialization...")
self._input_dir = input_dir
self._output_dir = output_dir
self._norm_dir = norm_dir
self._input_file_list = sorted(glob.glob(input_dir+'/*.bin'))
self._input_spec_list = sorted(glob.glob(input_dir+'/*.txt'))
self._output_file_list = sorted(glob.glob(output_dir+'/*.bin'))
self._file_len = len(self._input_file_list)
self._name = name
assert self._file_len == len(self._output_file_list), "# input files and output file is not matched"
self._w = w
self._u = u
self.eof = False
self.file_change = False
self.num_samples = 0
self._inputs = 0
self._outputs = 0
self._epoch = 1
self._num_file = 0
self._start_idx = self._w
# self._inputs = self._padding(
# self._read_input(self._input_file_list[self._num_file],
# self._input_spec_list[self._num_file]), self._pad, self._w)
#
# self._outputs = self._padding(self._read_output(self._output_file_list[self._num_file]), self._pad, self._w)
#
# self.eof = False
# self.file_change = False
# self._outputs = self._outputs[0:self._inputs.shape[0]]
# assert np.shape(self._inputs)[0] == np.shape(self._outputs)[0], \
# ("# samples is not matched between input: %d and output: %d files"
# % (np.shape(self._inputs)[0], np.shape(self._outputs)[0]))
#
# self.num_samples = np.shape(self._outputs)[0]
norm_param = sio.loadmat(self._norm_dir+'/global_normalize_factor.mat')
self.train_mean = norm_param['global_mean']
self.train_std = norm_param['global_std']
self.raw_inputs = 0 # adding part
# print("Done.")
# print("BOF : " + self._name + " file_" + str(self._num_file).zfill(2))
def _binary_read_with_shape(self):
pass
@staticmethod
def _read_input(input_file_dir, input_spec_dir):
data = np.fromfile(input_file_dir, dtype=np.float32) # (# total frame, feature_size)
with open(input_spec_dir,'r') as f:
spec = f.readline()
size = spec.split(',')
data = data.reshape((int(size[0]), int(size[1])), order='F')
return data
@staticmethod
def _read_output(output_file_dir):
data = np.fromfile(output_file_dir, dtype=np.float32) # data shape : (# total frame,)
data = data.reshape(-1, 1) # data shape : (# total frame, 1)
return data
@staticmethod
def _padding(inputs, batch_size, w_val):
pad_size = batch_size - inputs.shape[0] % batch_size
inputs = np.concatenate((inputs, np.zeros((pad_size, inputs.shape[1]), dtype=np.float32)))
window_pad = np.zeros((w_val, inputs.shape[1]))
inputs = np.concatenate((window_pad, inputs, window_pad), axis=0)
return inputs
def next_batch(self, batch_size):
if self._start_idx == self._w:
self._inputs = self._padding(
self._read_input(self._input_file_list[self._num_file],
self._input_spec_list[self._num_file]), batch_size, self._w)
self._outputs = self._padding(self._read_output(self._output_file_list[self._num_file]), batch_size, self._w)
self._outputs = self._outputs[0:self._inputs.shape[0]]
assert np.shape(self._inputs)[0] == np.shape(self._outputs)[0], \
("# samples is not matched between input: %d and output: %d files"
% (np.shape(self._inputs)[0], np.shape(self._outputs)[0]))
self.num_samples = np.shape(self._outputs)[0]
if self._start_idx + batch_size > self.num_samples:
self._start_idx = self._w
self.file_change = True
self._num_file += 1
# print("EOF : " + self._name + " file_" + str(self._num_file-1).zfill(2) +
# " -> BOF : " + self._name + " file_" + str(self._num_file).zfill(2))
if self._num_file > self._file_len - 1:
self.eof = True
self._num_file = 0
# print("EOF : last " + self._name + " file. " + "-> BOF : " + self._name + " file_" +
# str(self._num_file).zfill(2))
# self._inputs = self._read_input(self._input_file_list[self._num_file], self._input_spec_list[self._num_file])
# self._outputs = self._read_output(self._output_file_list[self._num_file])
self._inputs = self._padding(
self._read_input(self._input_file_list[self._num_file],
self._input_spec_list[self._num_file]), batch_size, self._w)
self._outputs = self._padding(self._read_output(self._output_file_list[self._num_file]), batch_size, self._w)
data_len = np.shape(self._inputs)[0]
self._outputs = self._outputs[0:data_len, :]
assert np.shape(self._inputs)[0] == np.shape(self._outputs)[0], \
("# samples is not matched between input: %d and output: %d files"
% (np.shape(self._inputs)[0], np.shape(self._outputs)[0]))
self.num_samples = np.shape(self._outputs)[0]
else:
self.file_change = False
self.eof = False
inputs = self._inputs[self._start_idx - self._w:self._start_idx + batch_size + self._w, :]
self.raw_inputs = inputs # adding part
inputs = self.normalize(inputs)
inputs = utils.bdnn_transform(inputs, self._w, self._u)
inputs = inputs[self._w: -self._w, :]
outputs = self._outputs[self._start_idx - self._w:self._start_idx + batch_size + self._w, :]
outputs = utils.bdnn_transform(outputs, self._w, self._u)
outputs = outputs[self._w: -self._w, :]
self._start_idx += batch_size
return inputs, outputs
def normalize(self, x):
x = (x - self.train_mean)/self.train_std
return x
def reader_initialize(self):
self._num_file = 0
self._start_idx = 0
self.eof = False
def eof_checker(self):
return self.eof
def file_change_checker(self):
return self.file_change
def file_change_initialize(self):
self.file_change = False
def set_random_batch(self, batch_size):
self._start_idx = np.maximum(0, np.random.random_integers(self.num_samples - batch_size))
def dense_to_one_hot(labels_dense, num_classes=2):
"""Convert class labels from scalars to one-hot vectors."""
# copied from TensorFlow tutorial
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
| 7,108
| 37.427027
| 123
|
py
|
VAD
|
VAD-master/lib/python/data_reader_DNN_v2.py
|
import numpy as np
import os
import glob
import utils
import scipy.io as sio
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
class DataReader(object):
def __init__(self, input_dir, output_dir, norm_dir, w=19, u=9, name=None):
# print(name.title() + " data reader initialization...")
self._input_dir = input_dir
self._output_dir = output_dir
self._norm_dir = norm_dir
self._input_file_list = sorted(glob.glob(input_dir+'/*.bin'))
self._input_spec_list = sorted(glob.glob(input_dir+'/*.txt'))
self._output_file_list = sorted(glob.glob(output_dir+'/*.bin'))
self._file_len = len(self._input_file_list)
self._name = name
assert self._file_len == len(self._output_file_list), "# input files and output file is not matched"
self._w = w
self._u = u
self.eof = False
self.file_change = False
self.num_samples = 0
self._inputs = 0
self._outputs = 0
self._epoch = 1
self._num_file = 0
self._start_idx = self._w
norm_param = sio.loadmat(self._norm_dir+'/global_normalize_factor.mat')
self.train_mean = norm_param['global_mean']
self.train_std = norm_param['global_std']
self.raw_inputs = 0 # adding part
# print("Done")
# print("BOF : " + self._name + " file_" + str(self._num_file).zfill(2))
def _binary_read_with_shape(self):
pass
@staticmethod
def _read_input(input_file_dir, input_spec_dir):
data = np.fromfile(input_file_dir, dtype=np.float32) # (# total frame, feature_size)
with open(input_spec_dir,'r') as f:
spec = f.readline()
size = spec.split(',')
data = data.reshape((int(size[0]), int(size[1])), order='F')
return data
@staticmethod
def _read_output(output_file_dir):
data = np.fromfile(output_file_dir, dtype=np.float32) # data shape : (# total frame,)
data = data.reshape(-1, 1) # data shape : (# total frame, 1)
return data
@staticmethod
def _padding(inputs, batch_size, w_val):
pad_size = batch_size - inputs.shape[0] % batch_size
inputs = np.concatenate((inputs, np.zeros((pad_size, inputs.shape[1]), dtype=np.float32)))
window_pad = np.zeros((w_val, inputs.shape[1]))
inputs = np.concatenate((window_pad, inputs, window_pad), axis=0)
return inputs
def next_batch(self, batch_size):
if self._start_idx == self._w:
self._inputs = self._padding(
self._read_input(self._input_file_list[self._num_file],
self._input_spec_list[self._num_file]), batch_size, self._w)
self._outputs = self._padding(self._read_output(self._output_file_list[self._num_file]), batch_size, self._w)
assert np.shape(self._inputs)[0] == np.shape(self._outputs)[0], \
("# samples is not matched between input: %d and output: %d files"
% (np.shape(self._inputs)[0], np.shape(self._outputs)[0]))
self.num_samples = np.shape(self._outputs)[0]
if self._start_idx + batch_size > self.num_samples:
self._start_idx = self._w
self.file_change = True
self._num_file += 1
# print("EOF : " + self._name + " file_" + str(self._num_file-1).zfill(2) +
# " -> BOF : " + self._name + " file_" + str(self._num_file).zfill(2))
if self._num_file > self._file_len - 1:
self.eof = True
self._num_file = 0
# print("EOF : last " + self._name + " file. " + "-> BOF : " + self._name + " file_" +
# str(self._num_file).zfill(2))
self._inputs = self._padding(
self._read_input(self._input_file_list[self._num_file],
self._input_spec_list[self._num_file]), batch_size, self._w)
self._outputs = self._padding(self._read_output(self._output_file_list[self._num_file]), batch_size, self._w)
data_len = np.shape(self._inputs)[0]
self._outputs = self._outputs[0:data_len, :]
assert np.shape(self._inputs)[0] == np.shape(self._outputs)[0], \
("# samples is not matched between input: %d and output: %d files"
% (np.shape(self._inputs)[0], np.shape(self._outputs)[0]))
self.num_samples = np.shape(self._outputs)[0]
else:
self.file_change = False
self.eof = False
inputs = self._inputs[self._start_idx - self._w:self._start_idx + batch_size + self._w, :]
self.raw_inputs = inputs # adding part
inputs = self.normalize(inputs)
inputs = utils.bdnn_transform(inputs, self._w, self._u)
inputs = inputs[self._w: -self._w, :]
outputs = self._outputs[self._start_idx:self._start_idx + batch_size, :]
self._start_idx += batch_size
return inputs, outputs
#num_batches = (np.shape(self._outputs)[0] - np.shape(self._outputs)[0] % batch_size) / batch_size
def normalize(self, x):
x = (x - self.train_mean)/self.train_std
# a = (np.std(x, axis=0))
return x
def reader_initialize(self):
self._num_file = 0
self._start_idx = 0
self.eof = False
def eof_checker(self):
return self.eof
def file_change_checker(self):
return self.file_change
def file_change_initialize(self):
self.file_change = False
def dense_to_one_hot(labels_dense, num_classes=2):
"""Convert class labels from scalars to one-hot vectors."""
# copied from TensorFlow tutorial
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
# file_dir = "/home/sbie/github/VAD_KJT/Datamake/Database/Aurora2withSE"
# input_dir1 = file_dir + "/STFT2"
# output_dir1 = file_dir + "/Labels"
# dr = DataReader(input_dir1, output_dir1, input_dir1,name='test')
#
# for i in range(1000000):
# tt, pp = dr.next_batch(500)
# print("asdf")
| 6,299
| 34.393258
| 121
|
py
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/train_on_simulation.py
|
from typing import List
import os
import time
import argparse
from argparse import Namespace
import logging
from scipy import sparse as sp #type: ignore
import numpy as np #type: ignore
from sklearn.utils.extmath import randomized_svd #type: ignore
from tqdm import tqdm #type: ignore
import pandas as pd #type: ignore
from scipy import sparse as sp #type: ignore
import torch #type: ignore
from acgan.module import *
from acgan.recommender import *
def frame2mat(df, num_u, num_i):
row, col = df.uidx, df.iidx
data = np.ones(len(row))
mat = sp.csr_matrix((data, (row, col)), shape=(num_u, num_i))
return mat
def main(args: Namespace):
ratings = pd.read_feather(os.path.join(args.data_path, args.data_name + '_smaple'))
user_num, item_num = ratings.uidx.max() + 1, ratings.iidx.max() + 1
#df = pd.read_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_full.feather'))
tr_df = pd.read_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_train.feather'))
val_df = pd.read_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_val.feather'))
te_df = pd.read_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_test.feather'))
if args.tune_mode:
tr_df = pd.concate([tr_df, val_df])
te_df = te_df
else:
tr_df = tr_df
te_df = val_df
past_hist = tr_df.groupby('uidx').apply(lambda x: set(x.iidx)).to_dict()
item_cnt_dict = tr_df.groupby('iidx').count().uidx.to_dict()
item_cnt = np.array([item_cnt_dict.get(iidx, 0) for iidx in range(item_num)])
logger.info(f'test data size: {te_df.shape}')
dim=args.dim
rel_factor = FactorModel(user_num, item_num, dim)
PATH = os.path.join(args.sim_path, f'{args.prefix}_rel.pt')
rel_factor.load_state_dict(torch.load(PATH))
rel_factor.eval()
train_expo_factor = FactorModel(user_num, item_num, dim)
PATH = os.path.join(args.sim_path, f'{args.prefix}_expo.pt')
train_expo_factor.load_state_dict(torch.load(PATH))
train_expo_factor.eval()
train_expo_factor = NoiseFactor(train_expo_factor, args.dim)
train_expo_factor = train_expo_factor.to(torch.device(f'cuda:{args.cuda_idx}'))
train_expo_factor.load_state_dict(torch.load(os.path.join(args.sim_path, f'{args.prefix}_expo_noise.pt')))
train_expo_factor.eval()
expo_factor = FactorModel(user_num, item_num, dim)
PATH = os.path.join(args.sim_path, f'{args.prefix}_expo_bs.pt')
expo_factor.load_state_dict(torch.load(PATH))
expo_factor.eval()
rating_model = RatingEstimator(user_num, item_num, rel_factor)
expo_model = ClassRecommender(user_num, item_num, expo_factor)
tr_mat = frame2mat(tr_df, user_num, item_num)
val_mat = frame2mat(val_df, user_num, item_num)
choices = args.models
logging.info(f'Running {choices}')
def get_model(model_str, user_num, item_num, factor_num):
if model_str == 'mlp':
return MLPRecModel(user_num, item_num, factor_num)
elif model_str == 'gmf':
return FactorModel(user_num, item_num, factor_num)
elif model_str == 'ncf':
return NCFModel(user_num, item_num, factor_num)
else:
raise NotImplementedError(f'{model_str} is not implemented')
logging.info('-------The Popularity model-------')
pop_factor = PopularModel(item_cnt)
pop_model = PopRecommender(pop_factor)
logger.info('unbiased eval for plian popular model on test')
unbiased_eval(user_num, item_num, te_df, pop_model, epsilon=args.epsilon,
rel_model=rating_model, past_hist=past_hist, expo_model=expo_model, expo_compound=args.p)
logger.info('-------The SVD model---------')
sv = SVDRecommender(tr_mat.shape[0], tr_mat.shape[1], dim)
logger.info(f'model with dimension {dim}')
sv.fit(tr_mat)
logger.info('un-biased eval for SVD model on test')
unbiased_eval(user_num, item_num, te_df, sv, epsilon=args.epsilon,
rel_model=rating_model, past_hist=past_hist, expo_model=expo_model, expo_compound=args.p)
def complete_experiment(model_str, user_num, item_num, dim):
logging.info(f'-------The {model_str} model-------')
base_factor = get_model(model_str, user_num=user_num, item_num=item_num, factor_num=dim)
base_model =ClassRecommender(user_num, item_num, base_factor)
base_model.fit(tr_df,
num_epochs=args.epoch,
cuda=args.cuda_idx,
decay=1e-8,
num_neg=args.num_neg,
past_hist=past_hist,
lr=args.lr)
logger.info(f'unbiased eval for {model_str} model on test')
unbiased_eval(user_num, item_num, te_df, base_model, epsilon=args.epsilon,
rel_model=rating_model, past_hist=past_hist, expo_model=expo_model, expo_compound=args.p)
logging.info(f'-------The {model_str} Pop Adjust model-------')
pop_adjust_factor = get_model(model_str, user_num=user_num, item_num=item_num, factor_num=dim)
pop_adjust_model = ClassRecommender(user_num, item_num, pop_adjust_factor, pop_factor, expo_thresh=0.1)
pop_adjust_model.fit(tr_df,
num_epochs=args.epoch,
cuda=args.cuda_idx,
decay=args.decay,
num_neg=args.num_neg,
past_hist=past_hist,
lr=args.lr)
logger.info(f'unbiased eval for adjust {model_str} with popular model on test')
unbiased_eval(user_num, item_num, te_df, pop_adjust_model, epsilon=args.epsilon,
rel_model=rating_model, past_hist=past_hist, expo_model=expo_model, expo_compound=args.p)
del pop_adjust_factor
logging.info(f'-------The {model_str} Mirror Adjust model-------')
adjust_factor = get_model(model_str, user_num=user_num, item_num=item_num, factor_num=dim)
adjust_model = ClassRecommender(user_num, item_num, adjust_factor, base_factor, expo_thresh=0.1)
adjust_model.fit(tr_df,
num_epochs=args.epoch,
cuda=args.cuda_idx,
num_neg=args.num_neg,
past_hist=past_hist,
decay=args.decay,
lr=args.lr)
logger.info(f'un-biased eval for {model_str} mirror adjusted model')
unbiased_eval(user_num, item_num, te_df, adjust_model, epsilon=args.epsilon,
rel_model=rating_model, past_hist=past_hist, expo_model=expo_model, expo_compound=args.p)
del adjust_factor
logger.info(f'-------The {model_str} Oracle Adjust model---------')
oracle_factor = get_model(model_str, user_num=user_num, item_num=item_num, factor_num=dim)
oracle_model = ClassRecommender(user_num,
item_num, oracle_factor, train_expo_factor, expo_thresh=0.1, expo_compound=args.p)
oracle_model.fit(tr_df,
num_epochs=args.epoch,
cuda=args.cuda_idx,
num_neg=args.num_neg,
past_hist=past_hist,
decay=args.decay,
lr=args.lr)
logger.info('un-biased eval for oracle model on test')
unbiased_eval(user_num, item_num, te_df, oracle_model, epsilon=args.epsilon,
rel_model=rating_model, past_hist=past_hist, expo_model=expo_model, expo_compound=args.p)
del oracle_factor
for model_str in choices:
if model_str != 'acgan':
complete_experiment(model_str, user_num, item_num, dim)
if 'acgan' in choices:
logger.info('-------The AC GAN model---------')
f = get_model(args.f_model, user_num, item_num, dim)
g = get_model(args.g_model, user_num, item_num, dim)
beta = BetaModel(user_num=user_num, item_num=item_num)
f_recommender = ClassRecommender(user_num, item_num, f)
g_recommender = ClassRecommender(user_num, item_num, g)
g_recommender.fit(tr_df,
num_epochs=args.g_round_head,
cuda=args.cuda_idx,
num_neg=args.num_neg,
past_hist=past_hist,
decay=args.decay,
lr=args.lr)
ac_train_v3(f, False, g, False, beta, tr_df,
user_num=user_num,
item_num=item_num,
num_neg=args.num_neg,
past_hist=past_hist,
val_df=te_df,
rating_model=rating_model,
expo_model=expo_model,
num_epochs=args.epoch,
decay=args.decay,
cuda_idx=args.cuda_idx,
lr=args.lr,
g_weight=0.5,
expo_compound=args.p,
epsilon=args.epsilon)
logger.info(f'eval on test with f_model ({args.f_model})')
unbiased_eval(user_num, item_num, te_df, f_recommender, epsilon=args.epsilon,
rel_model=rating_model, past_hist=past_hist, expo_model=expo_model, expo_compound=args.p)
logger.info(f'eval on test with g_model ({args.g_model})')
unbiased_eval(user_num, item_num, te_df, g_recommender, epsilon=args.epsilon,
rel_model=rating_model, past_hist=past_hist, expo_model=expo_model, expo_compound=args.p)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=1024)
parser.add_argument('--dim', type=int, default=16)
parser.add_argument('--epsilon', type=float, default=4)
parser.add_argument('--p', type=float, default=1)
parser.add_argument('--epoch', type=float, default=10)
parser.add_argument('--decay', type=float, default=1e-7)
parser.add_argument('--sim_path', type=str, required=True)
parser.add_argument('--data_path', type=str, required=True)
parser.add_argument('--cuda_idx', type=int, default=0)
parser.add_argument('--data_name', type=str, default='ratings.feather')
parser.add_argument('--prefix', type=str, default='ml_1m_mf')
parser.add_argument('--tune_mode', action='store_true')
parser.add_argument('--num_neg', type=str, default=4)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--models',
default=['ncf', 'mlp', 'gmf', 'acgan'],
nargs='+',
help = "input a list of ['ncf', 'mlp', 'gmf', 'acgan']")
parser.add_argument('--f_model', type=str, default='mlp')
parser.add_argument('--g_model', type=str, default='mlp')
parser.add_argument('--g_round_head', type=int, default=5)
args = parser.parse_args()
### set up logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fh = logging.FileHandler(f'log/{args.prefix}-{str(time.time())}.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.WARN)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.info(args)
main(args)
| 11,375
| 43.787402
| 111
|
py
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/setup.py
|
from setuptools import setup, find_packages
print(find_packages())
setup(name='acgan',
version='1.0',
packages=['acgan'],
package_data={"acgan": ["py.typed"]})
| 178
| 24.571429
| 43
|
py
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/NCF_validation.py
|
from typing import List
import os
import time
import argparse
from argparse import Namespace
import logging
from scipy import sparse as sp #type: ignore
import numpy as np #type: ignore
from sklearn.utils.extmath import randomized_svd #type: ignore
from tqdm import tqdm #type: ignore
import pandas as pd #type: ignore
from scipy import sparse as sp #type: ignore
import torch #type: ignore
from acgan.module import *
from acgan.recommender import *
from ncf_utils import *
class DuckModel:
"""An adapter class"""
def __init__(self, model):
self.model = model
def predict(self, in_data, batch_size=100, verbose=0):
users, items = in_data
scores = self.model.score(users.tolist(), items.tolist())
return scores
dataset = Dataset('data/ncf_data/ml-1m')
train, testRatings, testNegatives = dataset.trainMatrix, dataset.testRatings, dataset.testNegatives
uidx, iidx = train.nonzero()
rating = np.ones_like(uidx).astype(np.float32)
ts = np.arange(rating.shape[0])
train_df = pd.DataFrame({'uidx': uidx, 'iidx': iidx, 'rating': rating, 'ts': ts})
past_hist = train_df.groupby('uidx').apply(lambda x: set(x.iidx)).to_dict()
user_num, item_num = train_df.uidx.max() + 1, train_df.iidx.max() + 1
evaluation_threads = 1
factor_num = 32
K = 10
factor = NCFModel(user_num, item_num, factor_num)
recom = ClassRecommender(user_num, item_num, factor)
recom.fit(train_df,
num_epochs=20,
cuda=0,
decay=1e-7,
num_neg=4,
past_hist=past_hist, batch_size=256,
lr=0.01)
duck_model = DuckModel(recom)
hit, ndcg = evaluate_model(duck_model, testRatings, testNegatives, K, evaluation_threads)
print(np.mean(hit), np.mean(ndcg))
| 1,725
| 28.254237
| 99
|
py
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/robust_simulation.py
|
"""Script to generate recommendation data from simulation"""
import argparse
from argparse import Namespace
import os
import pandas as pd #type: ignore
import torch #type: ignore
import numpy as np #type: ignore
from scipy import sparse as sp #type: ignore
from tqdm import tqdm #type: ignore
from acgan.data import RatingData
from acgan.module import FactorModel, NoiseFactor
from acgan.recommender import ClassRecommender, RatingEstimator, BPRRecommender
from sklearn.model_selection import train_test_split
torch.manual_seed(123)
np.random.seed(123)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def main(args: Namespace):
ratings = pd.read_feather(os.path.join(args.data_path, args.data_name))
u_limit, i_limit = args.u_limit, args.i_limit
ratings = ratings[(ratings.uidx < u_limit) & (ratings.iidx < i_limit)]
ratings.reset_index(inplace=True)
ratings.to_feather(os.path.join(args.data_path, args.data_name + '_smaple'))
u_num, i_num = ratings.uidx.max() + 1, ratings.iidx.max() + 1
print(f'u: {u_num}, i: {i_num}')
#
print('train rel model')
rel_factor = FactorModel(u_num, i_num, args.dim)
rating_features = list(zip(ratings.uidx, ratings.iidx, ratings.rating))
rating_model = RatingEstimator(u_num, i_num, rel_factor)
rating_model.fit(rating_features, cuda=0, num_epochs=args.epoch)
#
print('train expo model')
expo_factor = FactorModel(u_num, i_num, args.dim)
#expo_model = BPRRecommender(u_num, i_num, expo_factor)
expo_model = ClassRecommender(u_num, i_num, expo_factor)
full_mat = sp.csr_matrix((ratings.rating, (ratings.uidx, ratings.iidx)), shape=(u_num, i_num))
print(full_mat.shape)
expo_model.fit(ratings, cuda=0, num_epochs=args.epoch, decay=args.decay)
torch.save(rel_factor.state_dict(), os.path.join(args.sim_path, f'{args.prefix}_rel.pt'))
torch.save(expo_factor.state_dict(), os.path.join(args.sim_path, f'{args.prefix}_expo.pt'))
print('get noise added expo model')
expo_factor = NoiseFactor(expo_factor, args.dim, noise_ratio=args.noise_ratio)
expo_factor = expo_factor.cuda()
torch.save(expo_factor.state_dict(), os.path.join(args.sim_path, f'{args.prefix}_expo_noise.pt'))
# re-assign the expo model
expo_model = ClassRecommender(u_num, i_num, expo_factor)
sigmoid = lambda x: np.exp(x) / (1 + np.exp(x))
u_all = np.arange(u_num).repeat(i_num)
i_all = np.arange(i_num).repeat(u_num).reshape(i_num, u_num).reshape(-1, order='F')
est_rel = rating_model.score(u_all, i_all)
est_click_prob = sigmoid(est_rel - args.epsilon)
est_logits = expo_model.score(u_all, i_all)
est_expo_prob = sigmoid(est_logits) ** args.p
simu_size = len(est_click_prob)
click_event = np.random.random(simu_size) < est_click_prob
expo_event = np.random.random(simu_size) < est_expo_prob
valid = click_event * expo_event
train_valid = valid
print(f'total size: {len(valid)}, valid size: {valid.sum()}')
out = {}
out['uidx'] = u_all[valid]
out['iidx'] = i_all[valid]
out['click_prob'] = est_click_prob[valid]
out['expo_prob'] = est_expo_prob[valid]
# placeholder variable to train the testing exposure model
out['rating'] = np.ones(out['click_prob'].size)
out['ts'] = np.random.rand(out['click_prob'].size)
train_df = pd.DataFrame(out)
new_expo_factor = FactorModel(u_num, i_num, args.dim).cuda()
new_expo_model = ClassRecommender(u_num, i_num, new_expo_factor)
new_expo_model.fit(train_df, cuda=0, num_epochs=args.epoch, decay=args.decay)
torch.save(new_expo_factor.state_dict(), os.path.join(args.sim_path, f'{args.prefix}_expo_bs.pt'))
est_rel = rating_model.score(u_all, i_all)
est_click_prob = sigmoid(est_rel - args.epsilon)
est_logits = new_expo_model.score(u_all, i_all)
expo_prob = sigmoid(est_logits) ** args.p
simu_size = len(est_click_prob)
click_event = np.random.random(simu_size) < est_click_prob
expo_event = np.random.random(simu_size) < est_expo_prob
valid = click_event * expo_event * (~train_valid)
robu_out = {}
robu_out['uidx'] = u_all[valid]
robu_out['iidx'] = i_all[valid]
robu_out['click_prob'] = est_click_prob[valid]
robu_out['expo_prob'] = est_expo_prob[valid]
print(valid.sum())
size = valid.sum()
# placeholder variable to train the testing exposure model
robu_out['rating'] = np.ones(size)
robu_out['ts'] = np.random.rand(size)
robu_df = pd.DataFrame(robu_out)
val_df, test_df = train_test_split(robu_df, test_size=0.5)
train_df = train_df.reset_index(drop=True)
print(f'train shape: {train_df.shape}')
val_df = val_df.reset_index(drop=True)
print(f'val shape: {val_df.shape}')
test_df = test_df.reset_index(drop=True)
print(f'test shape: {test_df.shape}')
print(train_df.head())
train_df.to_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_train.feather'))
val_df.to_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_val.feather'))
test_df.to_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_test.feather'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=1024)
parser.add_argument('--dim', type=int, default=16)
parser.add_argument('--epsilon', type=float, default=3)
parser.add_argument('--p', type=float, default=2)
parser.add_argument('--epoch', type=float, default=10)
parser.add_argument('--decay', type=float, default=1e-8)
parser.add_argument('--sim_path', type=str, required=True)
parser.add_argument('--data_path', type=str, required=True)
parser.add_argument('--data_name', type=str, default='ratings.feather')
parser.add_argument('--prefix', type=str, default='ml_1m_mf')
parser.add_argument('--sample_sim', action='store_true')
parser.add_argument('--item_sample_size', type=int, default=2000)
parser.add_argument('--noise_ratio', type=float, default=1.0)
parser.add_argument('--u_limit', type=int, default=500)
parser.add_argument('--i_limit', type=int ,default=1000)
args = parser.parse_args()
main(args)
| 6,198
| 43.92029
| 102
|
py
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/ncf_utils.py
|
'''
This code copied directly from https://github.com/hexiangnan/neural_collaborative_filtering
in order to replicate their results.
Created on Aug 8, 2016
Processing datasets.
@author: Xiangnan He (xiangnanhe@gmail.com)
'''
import scipy.sparse as sp
import numpy as np
class Dataset(object):
'''
classdocs
'''
def __init__(self, path):
'''
Constructor
'''
self.trainMatrix = self.load_rating_file_as_matrix(path + ".train.rating")
self.testRatings = self.load_rating_file_as_list(path + ".test.rating")
self.testNegatives = self.load_negative_file(path + ".test.negative")
assert len(self.testRatings) == len(self.testNegatives)
self.num_users, self.num_items = self.trainMatrix.shape
def load_rating_file_as_list(self, filename):
ratingList = []
with open(filename, "r") as f:
line = f.readline()
while line != None and line != "":
arr = line.split("\t")
user, item = int(arr[0]), int(arr[1])
ratingList.append([user, item])
line = f.readline()
return ratingList
def load_negative_file(self, filename):
negativeList = []
with open(filename, "r") as f:
line = f.readline()
while line != None and line != "":
arr = line.split("\t")
negatives = []
for x in arr[1: ]:
negatives.append(int(x))
negativeList.append(negatives)
line = f.readline()
return negativeList
def load_rating_file_as_matrix(self, filename):
'''
Read .rating file and Return dok matrix.
The first line of .rating file is: num_users\t num_items
'''
# Get number of users and items
num_users, num_items = 0, 0
with open(filename, "r") as f:
line = f.readline()
while line != None and line != "":
arr = line.split("\t")
u, i = int(arr[0]), int(arr[1])
num_users = max(num_users, u)
num_items = max(num_items, i)
line = f.readline()
# Construct matrix
mat = sp.dok_matrix((num_users+1, num_items+1), dtype=np.float32)
with open(filename, "r") as f:
line = f.readline()
while line != None and line != "":
arr = line.split("\t")
user, item, rating = int(arr[0]), int(arr[1]), float(arr[2])
if (rating > 0):
mat[user, item] = 1.0
line = f.readline()
return mat
'''
Created on Apr 15, 2016
Evaluate the performance of Top-K recommendation:
Protocol: leave-1-out evaluation
Measures: Hit Ratio and NDCG
(more details are in: Xiangnan He, et al. Fast Matrix Factorization for Online Recommendation with Implicit Feedback. SIGIR'16)
@author: hexiangnan
'''
import math
import heapq # for retrieval topK
import multiprocessing
import numpy as np
from time import time
#from numba import jit, autojit
# Global variables that are shared across processes
_model = None
_testRatings = None
_testNegatives = None
_K = None
def evaluate_model(model, testRatings, testNegatives, K, num_thread):
"""
Evaluate the performance (Hit_Ratio, NDCG) of top-K recommendation
Return: score of each test rating.
"""
global _model
global _testRatings
global _testNegatives
global _K
_model = model
_testRatings = testRatings
_testNegatives = testNegatives
_K = K
hits, ndcgs = [],[]
if(num_thread > 1): # Multi-thread
pool = multiprocessing.Pool(processes=num_thread)
res = pool.map(eval_one_rating, range(len(_testRatings)))
pool.close()
pool.join()
hits = [r[0] for r in res]
ndcgs = [r[1] for r in res]
return (hits, ndcgs)
# Single thread
for idx in range(len(_testRatings)):
(hr,ndcg) = eval_one_rating(idx)
hits.append(hr)
ndcgs.append(ndcg)
return (hits, ndcgs)
def eval_one_rating(idx):
rating = _testRatings[idx]
items = _testNegatives[idx]
u = rating[0]
gtItem = rating[1]
items.append(gtItem)
# Get prediction scores
map_item_score = {}
users = np.full(len(items), u, dtype = 'int32')
predictions = _model.predict([users, np.array(items)],
batch_size=100, verbose=0)
for i in range(len(items)):
item = items[i]
map_item_score[item] = predictions[i]
items.pop()
# Evaluate top rank list
ranklist = heapq.nlargest(_K, map_item_score, key=map_item_score.get)
hr = getHitRatio(ranklist, gtItem)
ndcg = getNDCG(ranklist, gtItem)
return (hr, ndcg)
def getHitRatio(ranklist, gtItem):
for item in ranklist:
if item == gtItem:
return 1
return 0
def getNDCG(ranklist, gtItem):
for i in range(len(ranklist)):
item = ranklist[i]
if item == gtItem:
return math.log(2) / math.log(i+2)
return 0
| 5,181
| 30.02994
| 131
|
py
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/run.py
|
from typing import List
from scipy import sparse as sp #type: ignore
import numpy as np #type: ignore
from sklearn.utils.extmath import randomized_svd #type: ignore
from tqdm import tqdm
from acgan.recommender import SVDRecommender, BPRRecommender, eval_test
from acgan.module import FactorModel
# te = sp.load_npz('data/ml-1m/test.npz')
# val = sp.load_npz('data/ml-1m/val.npz')
# tr = sp.load_npz('data/ml-1m/train.npz')
# dim=32
# sv = SVDRecommender(tr.shape[0], tr.shape[1], dim)
# print(f'model with dimension {dim}')
# sv.fit(tr)
# eval_test(te, sv, cut_len=10)
# factor_model = FactorModel(tr.shape[0], tr.shape[1], dim)
# bpr = BPRRecommender(tr.shape[0], tr.shape[1], factor_model)
# bpr.fit(tr, val, cuda=0, num_neg=4)
# eval_test(te, bpr, cut_len=10)
from scipy import sparse as sp #type: ignore
from acgan.module import FactorModel, BetaModel
from acgan.recommender import ac_train
te = sp.load_npz('data/ml-1m/test.npz')
val = sp.load_npz('data/ml-1m/val.npz')
tr = sp.load_npz('data/ml-1m/train.npz')
dim=32
f = FactorModel(user_num=tr.shape[0], item_num=tr.shape[1], factor_num=dim)
g = FactorModel(user_num=tr.shape[0], item_num=tr.shape[1], factor_num=dim)
beta = BetaModel(user_num=tr.shape[0], item_num=tr.shape[1])
ac_train(f, g, beta, tr, val)
| 1,278
| 29.452381
| 75
|
py
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/train_on_real.py
|
from typing import List
import os
import time
import argparse
from argparse import Namespace
import logging
from scipy import sparse as sp #type: ignore
import numpy as np #type: ignore
from sklearn.utils.extmath import randomized_svd #type: ignore
from tqdm import tqdm #type: ignore
import pandas as pd #type: ignore
from scipy import sparse as sp #type: ignore
import torch #type: ignore
from acgan.module import *
from acgan.recommender import *
def frame2mat(df, num_u, num_i):
row, col = df.uidx, df.iidx
data = np.ones(len(row))
mat = sp.csr_matrix((data, (row, col)), shape=(num_u, num_i))
return mat
def main(args: Namespace):
ratings = pd.read_feather(os.path.join(args.data_path, args.data_name))
user_num, item_num = ratings.uidx.max() + 1, ratings.iidx.max() + 1
tr_df = pd.read_feather(os.path.join(args.data_path, 'train.feather'))
val_df = pd.read_feather(os.path.join(args.data_path, 'val.feather'))
te_df = pd.read_feather(os.path.join(args.data_path, 'test.feather'))
if not args.tune_mode:
tr_df = pd.concat([tr_df, val_df])
te_df = te_df
else:
tr_df = tr_df
te_df = val_df
past_hist = tr_df.groupby('uidx').apply(lambda x: set(x.iidx)).to_dict()
item_cnt_dict = tr_df.groupby('iidx').count().uidx.to_dict()
item_cnt = np.array([item_cnt_dict.get(iidx, 0) for iidx in range(item_num)])
hist = tr_df.groupby('uidx').apply(
lambda x: list(zip(x.ts, x.iidx))).to_dict()
for k in hist.keys():
hist[k] = [x[1] for x in sorted(hist[k])]
logger.info(f'test data size: {te_df.shape}')
rating_model = None
tr_mat = frame2mat(tr_df, user_num, item_num)
choices = args.models
logging.info(f'Running {choices}')
acgan_config = [args.f_model == 'seq', args.g_model == 'seq']
pop_factor = PopularModel(item_cnt)
logging.info('-------The Popularity model-------')
pop_model = PopRecommender(pop_factor)
logger.info('biased eval for plian popular model on test')
unbiased_eval(user_num, item_num, te_df, pop_model, past_hist=past_hist)
logger.info('-------The SVD model---------')
sv = SVDRecommender(tr_mat.shape[0], tr_mat.shape[1], args.dim)
logger.info(f'model with dimension {args.dim}')
sv.fit(tr_mat)
logger.info('biased eval for SVD model on test')
unbiased_eval(user_num, item_num, te_df, sv, past_hist=past_hist)
#unbiased_eval(user_num, item_num, te_df, sv)
def get_model(model_str, user_num, item_num, factor_num, max_len=50, num_layer=2):
if model_str == 'mlp':
return MLPRecModel(user_num, item_num, factor_num)
elif model_str == 'gmf':
return FactorModel(user_num, item_num, factor_num)
elif model_str == 'ncf':
return NCFModel(user_num, item_num, factor_num)
elif model_str == 'seq':
return AttentionModel(user_num, item_num, args.dim, max_len=max_len, num_layer=num_layer)
else:
raise NotImplementedError(f'{model_str} is not implemented')
def complete_experiment(model_str, user_num, item_num, dim, is_deep):
logging.info(f'-------The {model_str} model-------')
base_factor = get_model(model_str, user_num=user_num, item_num=item_num, factor_num=dim)
if is_deep:
base_model = DeepRecommender(user_num, item_num, base_factor)
else:
base_model = ClassRecommender(user_num, item_num, base_factor)
base_model.fit(tr_df, test_df=te_df,
num_epochs=args.epoch,
cuda=args.cuda_idx,
decay=args.decay,
num_neg=args.num_neg,
batch_size=args.batch_size,
past_hist=past_hist,
lr=args.lr)
logger.info(f'eval for {model_str} model on test')
unbiased_eval(user_num, item_num, te_df, base_model, past_hist=past_hist)
logging.info(f'-------The {model_str} Pop Adjust model-------')
pop_adjust_factor = get_model(model_str, user_num=user_num, item_num=item_num, factor_num=dim)
if is_deep:
pop_adjust_model = DeepRecommender(user_num, item_num, pop_adjust_factor, pop_factor, expo_thresh=0.1)
else:
pop_adjust_model = ClassRecommender(user_num, item_num, pop_adjust_factor, pop_factor, expo_thresh=0.1)
pop_adjust_model.fit(tr_df, test_df=te_df,
num_epochs=args.epoch,
cuda=args.cuda_idx,
decay=args.decay,
num_neg=args.num_neg,
batch_size=args.batch_size,
past_hist=past_hist,
lr=args.lr)
logger.info(f'eval for adjust {model_str} with popular model on test')
unbiased_eval(user_num, item_num, te_df, pop_adjust_model, past_hist=past_hist)
del pop_adjust_factor
logging.info(f'-------The {model_str} Mirror Adjust model-------')
adjust_factor = get_model(model_str, user_num=user_num, item_num=item_num, factor_num=dim)
if is_deep:
adjust_model = DeepRecommender(user_num, item_num, adjust_factor, base_factor, expo_thresh=0.1, expo_isdeep=True)
else:
adjust_model = ClassRecommender(user_num, item_num, adjust_factor, base_factor, expo_thresh=0.1)
adjust_model.fit(tr_df, test_df=te_df,
num_epochs=args.epoch,
cuda=args.cuda_idx,
num_neg=args.num_neg,
batch_size=args.batch_size,
past_hist=past_hist,
decay=args.decay,
lr=args.lr)
logger.info(f'eval for {model_str} mirror adjusted model')
unbiased_eval(user_num, item_num, te_df, adjust_model, past_hist=past_hist)
del adjust_factor
for model_str in choices:
if model_str != 'acgan':
complete_experiment(model_str, user_num, item_num, args.dim, model_str == 'seq')
if 'acgan' in choices:
logger.info(f'-------The AC GAN model with {args.f_model} / {args.g_model}---------')
if acgan_config[0]:
f = AttentionModel(user_num=user_num, item_num=item_num, factor_num=args.dim, max_len=50, num_layer=2)
f_recommender = DeepRecommender(max_u=user_num, max_v=item_num, seq_model=f)
f_recommender.set_user_record(hist)
else:
f = get_model(args.f_model, user_num=user_num, item_num=item_num, factor_num=args.dim)
f_recommender = ClassRecommender(user_num, item_num, f)
if acgan_config[1]:
g = AttentionModel(user_num=user_num, item_num=item_num, factor_num=args.dim, max_len=50, num_layer=2)
g_recommender = DeepRecommender(max_u=user_num, max_v=item_num, seq_model=g)
g_recommender.set_user_record(hist)
else:
g = get_model(args.g_model, user_num=user_num, item_num=item_num, factor_num=args.dim)
g_recommender = ClassRecommender(user_num, item_num, g)
beta = BetaModel(user_num=user_num, item_num=item_num)
g_recommender.fit(tr_df,
num_epochs=args.g_round_head,
cuda=args.cuda_idx,
num_neg=args.num_neg,
batch_size=args.batch_size,
past_hist=past_hist,
decay=args.decay,
lr=args.lr)
ac_train_v3(f, acgan_config[0], g, acgan_config[1], beta, tr_df,
user_num=user_num,
item_num=item_num,
val_df=te_df,
rating_model=rating_model,
num_epochs=args.epoch,
decay=args.decay,
cuda_idx=args.cuda_idx,
num_neg=args.num_neg,
batch_size=args.batch_size,
past_hist=past_hist,
g_weight=0.5,
lr=args.lr)
logger.info(f'--final eval for AC GAN {args.f_model} / {args.g_model}--')
unbiased_eval(user_num, item_num, te_df, f_recommender, past_hist=past_hist)
unbiased_eval(user_num, item_num, te_df, g_recommender, past_hist=past_hist)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=1024)
parser.add_argument('--dim', type=int, default=32)
parser.add_argument('--epoch', type=int, default=50)
parser.add_argument('--decay', type=float, default=1e-7)
parser.add_argument('--cuda_idx', type=int, default=0)
parser.add_argument('--data_path', type=str, required=True)
parser.add_argument('--data_name', type=str, default='ratings.feather')
parser.add_argument('--prefix', type=str, default='ml_1m_real')
parser.add_argument('--num_neg', type=str, default=4)
parser.add_argument('--tune_mode', action='store_true')
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--models',
default=['ncf', 'mlp', 'gmf', 'acgan', 'seq'],
nargs='+',
help = "input a list from ['ncf', 'mlp', 'gmf', 'acgan', 'seq']")
parser.add_argument('--f_model', type=str, default='mlp', choices=['ncf', 'mlp', 'gmf', 'seq'])
parser.add_argument('--g_model', type=str, default='mlp', choices=['ncf', 'mlp', 'gmf', 'seq'])
parser.add_argument('--g_round_head', type=int, default=5)
args = parser.parse_args()
### set up logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(f'log/{args.prefix}-{str(time.time())}.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.WARN)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.info(args)
main(args)
| 10,239
| 43.716157
| 125
|
py
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/simulation.py
|
"""Script to generate recommendation data from simulation"""
import argparse
from argparse import Namespace
import os
import pandas as pd #type: ignore
import torch #type: ignore
import numpy as np #type: ignore
from scipy import sparse as sp #type: ignore
from tqdm import tqdm #type: ignore
from acgan.data import RatingData
from acgan.module import FactorModel, NoiseFactor
from acgan.recommender import ClassRecommender, RatingEstimator, BPRRecommender
from sklearn.model_selection import train_test_split
def main(args: Namespace):
ratings = pd.read_feather(os.path.join(args.data_path, args.data_name))
u_num, i_num = ratings.uidx.max() + 1, ratings.iidx.max() + 1
rel_factor = FactorModel(u_num, i_num, args.dim)
expo_factor = FactorModel(u_num, i_num, args.dim)
rating_features = list(zip(ratings.uidx, ratings.iidx, ratings.rating))
rating_model = RatingEstimator(u_num, i_num, rel_factor)
#expo_model = BPRRecommender(u_num, i_num, expo_factor)
expo_model = ClassRecommender(u_num, i_num, expo_factor)
#
print('train rel model')
rating_model.fit(rating_features, cuda=0, num_epochs=args.epoch)
#
print('train expo model')
full_mat = sp.csr_matrix((ratings.rating, (ratings.uidx, ratings.iidx)), shape=(u_num, i_num))
print(full_mat.shape)
expo_model.fit(ratings, cuda=0, num_epochs=args.epoch, decay=args.decay)
torch.save(rel_factor.state_dict(), os.path.join(args.sim_path, f'{args.prefix}_rel.pt'))
torch.save(expo_factor.state_dict(), os.path.join(args.sim_path, f'{args.prefix}_expo.pt'))
print('get noise added expo model')
expo_factor = NoiseFactor(expo_factor, args.dim)
expo_factor = expo_factor.cuda()
torch.save(expo_factor.state_dict(), os.path.join(args.sim_path, f'{args.prefix}_expo_noise.pt'))
# re-assign the expo model
expo_model = ClassRecommender(u_num, i_num, expo_factor)
sigmoid = lambda x: np.exp(x) / (1 + np.exp(x))
if not args.sample_sim:
if u_num * i_num > 10000 * 10000:
raise ValueError('Size over limit, please use --sample_sim flag')
u_all = np.arange(u_num).repeat(i_num)
i_all = np.arange(i_num).repeat(u_num).reshape(i_num, u_num).reshape(-1, order='F')
est_rel = rating_model.score(u_all, i_all)
est_click_prob = sigmoid(est_rel - args.epsilon)
est_logits = expo_model.score(u_all, i_all)
est_expo_prob = sigmoid(est_logits) ** args.p
simu_size = len(est_click_prob)
click_event = np.random.random(simu_size) < est_click_prob
expo_event = np.random.random(simu_size) < est_expo_prob
valid = click_event * expo_event
out = {}
out['uidx'] = u_all
out['iidx'] = i_all
out['click_prob'] = est_click_prob
out['expo_prob'] = est_expo_prob
out['click'] = click_event * expo_event
out['expo'] = expo_event
out_df = pd.DataFrame(out)
out_df.to_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_full.feather'))
print(f'total size: {len(valid)}, valid size: {valid.sum()}')
out = {}
out['uidx'] = u_all[valid]
out['iidx'] = i_all[valid]
out['click_prob'] = est_click_prob[valid]
out['expo_prob'] = est_expo_prob[valid]
out_df = pd.DataFrame(out)
else:
print('Too many items to compute, only consider a subset')
template = np.ones(args.item_sample_size).astype(np.int64)
out = {'uidx':[], 'iidx':[], 'click_prob':[], 'expo_prob':[]}
for i in tqdm(range(u_num)):
candidate_item = np.random.randint(low=0, high=i_num, size=args.item_sample_size)
candidate_user = template * i
est_rel = rating_model.score(candidate_user, candidate_item)
est_click_prob = sigmoid(est_rel - args.epsilon)
est_logits = expo_model.score(candidate_user, candidate_item)
est_expo_prob = sigmoid(est_logits) ** args.p
click_event = np.random.random(args.item_sample_size) < est_click_prob
expo_event = np.random.random(args.item_sample_size) < est_expo_prob
valid = click_event * expo_event
if valid.sum() >= 1:
out['uidx'].extend(candidate_user[valid].tolist())
out['iidx'].extend(candidate_item[valid].tolist())
out['click_prob'].extend(est_click_prob[valid].tolist())
out['expo_prob'].extend(est_expo_prob[valid].tolist())
if len(out['uidx']) == 0:
raise ValueError('Simulation failed, does not gather positive signals')
out_df = pd.DataFrame(out)
train_df, tmp_df = train_test_split(out_df, test_size=0.2)
val_df, test_df = train_test_split(tmp_df, test_size=0.5)
train_df = train_df.reset_index(drop=True)
print(f'train shape: {train_df.shape}')
val_df = val_df.reset_index(drop=True)
print(f'val shape: {val_df.shape}')
test_df = test_df.reset_index(drop=True)
print(f'test shape: {test_df.shape}')
print(train_df.head())
train_df.to_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_train.feather'))
val_df.to_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_val.feather'))
test_df.to_feather(os.path.join(args.sim_path, f'{args.prefix}_sim_test.feather'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=2048)
parser.add_argument('--dim', type=int, default=32)
parser.add_argument('--epsilon', type=float, default=4)
parser.add_argument('--epoch', type=float, default=5)
parser.add_argument('--decay', type=float, default=1e-8)
parser.add_argument('--p', type=float, default=3)
parser.add_argument('--sim_path', type=str, required=True)
parser.add_argument('--data_path', type=str, required=True)
parser.add_argument('--data_name', type=str, default='ratings.feather')
parser.add_argument('--prefix', type=str, default='ml_1m_mf')
parser.add_argument('--sample_sim', action='store_true')
parser.add_argument('--item_sample_size', type=int, default=2000)
args = parser.parse_args()
main(args)
| 6,208
| 44.654412
| 101
|
py
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/acgan/module.py
|
"""Modules are to express the mathematical relationships between parameters.
Design note: The module shoudn't care about things like data transformations. It should be
as self-contained as possible. Dirty jobs should be done by the Model class which serves
as a bridge between reality(data) and the theory(module).
"""
from typing import List, Tuple, Any, Optional
from scipy import sparse as sp # type: ignore
import numpy as np # type: ignore
import torch # type: ignore
from torch import nn # type: ignore
class PopularModel(nn.Module):
def __init__(self, pop_cnt: np.ndarray, shrinkage: float = 0.5):
super(PopularModel, self).__init__()
pop_cnt_cp = pop_cnt.copy()
pop_cnt_cp[pop_cnt_cp < 1] = 1
rel_pop = (pop_cnt_cp / pop_cnt_cp.max()) ** shrinkage
rel_pop = rel_pop.reshape(-1, 1)
self.rep_pop_table = nn.Embedding(rel_pop.shape[0], 1)
self.rep_pop_table.weight.data.copy_(torch.from_numpy(rel_pop))
self.rep_pop_table.weight.requires_grad = False
def forward(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor: # type: ignore
item_pop_score = self.rep_pop_table(item).squeeze(-1)
return item_pop_score
def get_device(self):
return self.rep_pop_table.weight.device
class FactorModel(nn.Module):
def __init__(self, user_num: int, item_num: int, factor_num: int) -> None:
super(FactorModel, self).__init__()
self.embed_user = nn.Embedding(user_num, factor_num, sparse=True)
self.bias_user = nn.Embedding(user_num, 1, sparse=True)
self.embed_item = nn.Embedding(item_num, factor_num, sparse=True)
self.bias_item = nn.Embedding(item_num, 1, sparse=True)
self.final_layer = nn.Linear(factor_num, 1, bias=True)
#self.bias_global = nn.Parameter(torch.zeros(1))
nn.init.kaiming_normal_(self.embed_user.weight)
nn.init.kaiming_normal_(self.embed_item.weight)
nn.init.zeros_(self.bias_item.weight)
nn.init.zeros_(self.bias_user.weight)
def affinity_vector(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor: # type: ignore
vec_user = self.embed_user(user)
vec_item = self.embed_item(item)
prediction = (vec_user * vec_item)
return prediction
def forward(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor: # type: ignore
affinity_vec = self.affinity_vector(user, item)
bias_user = self.bias_user(user).squeeze(-1)
bias_item = self.bias_item(item).squeeze(-1)
prediction = self.final_layer(affinity_vec).squeeze(-1)
prediction += bias_item + bias_user
return prediction
def get_sparse_weight(self) -> List[torch.Tensor]:
out = [self.embed_user.weight, self.bias_user.weight,
self.embed_item.weight, self.bias_item.weight]
return out
def get_dense_weight(self) -> List[torch.Tensor]:
out = []
out.extend(self.final_layer.parameters())
return out
def get_l2(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor:
vec_user = self.embed_user(user)
vec_item = self.embed_item(item)
l2_loss = (vec_user ** 2).sum()
l2_loss += (vec_item ** 2).sum()
l2_loss += (self.final_layer.weight ** 2).sum()
return l2_loss
def get_device(self):
return self.embed_item.weight.device
class BetaModel(nn.Module):
def __init__(self, user_num: int, item_num: int) -> None:
super(BetaModel, self).__init__()
self.user_const = nn.Embedding(user_num, 1, sparse=True)
self.item_const = nn.Embedding(item_num, 1, sparse=True)
self.alpha = torch.nn.Parameter(torch.zeros(1)) # type: ignore
self.beta = torch.nn.Parameter(torch.ones(1)) # type: ignore
self.label_coef = torch.nn.Parameter(torch.zeros(1)) # type: ignore
nn.init.zeros_(self.user_const.weight)
nn.init.zeros_(self.item_const.weight)
def forward(self, user: torch.Tensor, item: torch.Tensor, g_s: torch.Tensor, label: torch.Tensor) -> torch.Tensor: # type: ignore
#user_v = self.user_const(user).squeeze(-1)
#item_v = self.item_const(item).squeeze(-1)
#score = (self.alpha + self.beta * g_s + self.label_coef * label * g_s)
score = (self.alpha + self.beta * g_s + self.label_coef * label * g_s) # beta v2
#score += user_v + item_v
return score
def get_sparse_weight(self) -> List[torch.Tensor]:
out = [self.user_const.weight, self.item_const.weight]
return out
def get_dense_weight(self) -> List[torch.Tensor]:
return [self.alpha, self.beta, self.label_coef]
def get_l2(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor:
user_v = self.user_const(user).squeeze(-1)
item_v = self.item_const(item).squeeze(-1)
l2_loss = (user_v ** 2).sum()
l2_loss += (item_v ** 2).sum()
#l2_loss += (self.beta ** 2).sum()
#l2_loss += (self.alpha ** 2).sum()
#l2_loss += (self.label_coef ** 2).sum()
return l2_loss
class MLPRecModel(nn.Module):
def __init__(
self,
user_num: int,
item_num: int,
factor_num: int,
layers_dim: List[int] = [
32,
16]):
super(MLPRecModel, self).__init__()
self.embed_user = nn.Embedding(user_num, factor_num, sparse=True)
self.embed_item = nn.Embedding(item_num, factor_num, sparse=True)
nn.init.kaiming_normal_(self.embed_user.weight)
nn.init.kaiming_normal_(self.embed_item.weight)
self.dense_layers = nn.ModuleList()
assert(isinstance(layers_dim, list))
input_dims = [2 * factor_num] + layers_dim
for i in range(len(layers_dim)):
self.dense_layers.append(
nn.Linear(input_dims[i], layers_dim[i], bias=True))
self.act_func = nn.ReLU()
self.out_put_layer = nn.Linear(layers_dim[-1], 1, bias=True)
def affinity_vector(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor: # type: ignore
vec_user = self.embed_user(user)
vec_item = self.embed_item(item)
x = torch.cat([vec_user, vec_item], dim=-1)
for linear_layer in self.dense_layers:
x = linear_layer(x)
x = self.act_func(x)
return x
def forward(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor: # type: ignore
x = self.affinity_vector(user, item)
prediction = self.out_put_layer(x).squeeze(-1)
return prediction
def get_device(self):
return self.embed_item.weight.device
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
with torch.no_grad():
device = self.embed_user.weight.device
ubt = torch.LongTensor(u_b).to(device)
vbt = torch.LongTensor(v_b).to(device)
score = self.forward(ubt, vbt).cpu().numpy()
return score
def get_sparse_weight(self) -> List[torch.Tensor]:
out = [self.embed_user.weight, self.embed_item.weight]
return out
def get_dense_weight(self) -> List[torch.Tensor]:
out = []
for layer in self.dense_layers:
out.extend(layer.parameters())
out.extend(self.out_put_layer.parameters())
return out
def get_l2(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor:
vec_user = self.embed_user(user)
vec_item = self.embed_item(item)
l2_loss = (vec_user ** 2).sum()
l2_loss += (vec_item ** 2).sum()
# for weight in self.get_dense_weight():
# l2_loss += (weight ** 2).sum()
return l2_loss
class NCFModel(nn.Module):
def __init__(self, user_num: int, item_num: int, factor_num: int, layers_dim: Optional[List[int]] = None):
super(NCFModel, self).__init__()
if layers_dim is None:
layers_dim = [factor_num // 2, factor_num // 4]
mlp_out_dim = layers_dim[-1]
gmf_out_dim = factor_num - mlp_out_dim
gmf_in_dim = gmf_out_dim
self.mlp = MLPRecModel(user_num, item_num, factor_num // 2, layers_dim=layers_dim)
self.gmf = FactorModel(user_num, item_num, gmf_in_dim)
self.out_put_layer = nn.Linear(in_features=factor_num, out_features=1, bias=True)
def affinity_vector(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor:
mlp_vec = self.mlp.affinity_vector(user, item)
gmf_vec = self.gmf.affinity_vector(user, item)
return torch.cat([mlp_vec, gmf_vec], dim=-1)
def forward(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor:
x = self.affinity_vector(user, item)
return self.out_put_layer(x).squeeze(-1)
def get_sparse_weight(self):
return self.mlp.get_sparse_weight() + self.gmf.get_sparse_weight()
def get_dense_weight(self):
return self.mlp.get_dense_weight() + self.gmf.get_dense_weight()
def get_l2(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor:
l2 = self.mlp.get_l2(user, item)
l2 += self.gmf.get_l2(user, item)
l2 += (self.out_put_layer.weight ** 2).sum()
return l2
def get_device(self):
return self.gmf.get_device()
class StructureNoise(nn.Module):
def __init__(self, factor_num: int) -> None:
super(StructureNoise, self).__init__()
self.l1 = nn.Linear(2 * factor_num, factor_num)
self.l2 = nn.Linear(factor_num, factor_num)
self.l3 = nn.Linear(factor_num, 1)
self.act = nn.ReLU()
def forward(
self,
user_vec: torch.Tensor,
item_vec: torch.Tensor) -> torch.Tensor:
x = torch.cat([user_vec, item_vec], dim=-1)
x = self.act(self.l1(x))
x = self.act(self.l2(x))
x = self.act(self.l3(x)).squeeze(-1)
return x
class NoiseFactor(nn.Module):
def __init__(self, facotr_model: torch.nn.Module, factor_num: int) -> None:
super(NoiseFactor, self).__init__()
self.noise_model = StructureNoise(factor_num)
self.facotr_model = facotr_model
self.embed_item = self.facotr_model.embed_item
def forward(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor: # type: ignore
prediction = self.facotr_model(user, item)
with torch.no_grad():
vec_user = self.facotr_model.embed_user(user)
vec_item = self.facotr_model.embed_item(item)
prediction += self.noise_model(vec_user, vec_item)
return prediction
def get_sparse_weight(self) -> List[torch.Tensor]:
return []
def get_dense_weight(self) -> List[torch.Tensor]:
return []
def get_l2(self, user: torch.Tensor, item: torch.Tensor) -> torch.Tensor:
return self.facotr_model.get_l2(user, item)
def get_device(self):
return self.facotr_model.get_device()
class AttentionModel(nn.Module):
def __init__(
self,
user_num: int,
item_num: int,
factor_num: int,
max_len: int = 20,
num_heads: int = 2,
num_layer: int = 2) -> None:
super(AttentionModel, self).__init__()
self.user_num = user_num
self.item_num = item_num
self.factor_num = factor_num
self.padding_idx = self.item_num
self.max_len = max_len
#self.embed_user = nn.Embedding(user_num, factor_num, sparse=True)
self.embed_item = nn.Embedding(item_num + 1, factor_num, sparse=False, padding_idx=self.padding_idx)
#self.target_item_embed = nn.Embedding(item_num + 1, factor_num, sparse=False, padding_idx=self.padding_idx)
self.position_encode = nn.Embedding(max_len, factor_num, sparse=False)
self.attention_list = nn.ModuleList()
for _ in range(num_layer):
self.attention_list.append(nn.MultiheadAttention(embed_dim=factor_num, num_heads=num_heads))
self.output_affine = nn.Linear(factor_num, 1, bias=True)
def get_device(self):
return self.embed_item.weight.device
def seq_vector(self, user_hist: torch.Tensor) -> torch.Tensor:
"""
args:
user: [B]
item: [B]
user_hist: [B, max_len]
"""
hist_item_vec = self.embed_item(user_hist) # [B, max_len, factor_num]
pos = torch.arange(self.max_len, device=self.get_device()).reshape(1, -1).repeat(hist_item_vec.shape[0], 1)
# add positional encoding
mask_item = (user_hist == self.padding_idx)
attn_item_vec = hist_item_vec + self.position_encode(pos)
attn_item_vec = attn_item_vec.transpose(1, 0) #[max_len, B, factor_num]
for atten_layer in self.attention_list:
attn_item_vec, _ = atten_layer(
query=attn_item_vec,
key=attn_item_vec,
value=attn_item_vec,
key_padding_mask=mask_item)
# attn_item_vec - [max_len, B, factor_num]
attn_item_vec = attn_item_vec.mean(dim=0) #[B, factor_num]
return attn_item_vec
def forward(self, items: torch.Tensor, user_hists: torch.Tensor) -> torch.Tensor:
# items - [B, ord]
assert(len(items.shape) == 2)
assert(items.shape[0] == user_hists.shape[0])
affinity_vec = self.seq_vector(user_hists) # [B, dim]
affinity_vec = affinity_vec.unsqueeze(1).repeat(1, items.shape[1], 1) # [B, ord, dim]
target_item_vec = self.embed_item(items) # - [B, ord, dim]
#target_item_vec = self.target_item_embed(items) # - [B, ord, dim]
score = self.output_affine(affinity_vec * target_item_vec) # [B, ord, 1]
return score.squeeze(-1) # [B, ord]
def get_dense_weight(self):
return list(self.parameters())
def get_sparse_weight(self):
return []
def get_l2(self, users: torch.Tensor, items: torch.Tensor) -> torch.Tensor:
target_item_vec = self.embed_item(items)
return (target_item_vec ** 2).sum() * 0
| 14,132
| 39.495702
| 134
|
py
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/acgan/data.py
|
import os
import argparse
import logging
from typing import Dict, List, Tuple, Optional, Set
import numpy as np # type: ignore
import pandas as pd # type: ignore
from scipy import sparse as sp # type: ignore
import torch # type: ignore
from torch.utils import data # type: ignore
from numpy.random import RandomState # type: ignore
def ml_1m(
data_path: str,
train_path: str,
val_path: str,
test_path: str) -> None:
ratings = pd.read_csv(
os.path.join(
data_path,
'ratings.dat'),
sep='::',
names=[
'uidx',
'iidx',
'rating',
'ts'],
dtype={
'uidx': int,
'iidx': int,
'rating': float,
'ts': float})
print(ratings.shape)
ratings.uidx = ratings.uidx - 1
ratings.iidx = ratings.iidx - 1
print(ratings.head())
ratings.to_feather(os.path.join(data_path, 'ratings.feather'))
user_hist: Dict[int, List[Tuple[int, float]]] = {}
for row in ratings.itertuples():
if row.uidx not in user_hist:
user_hist[row.uidx] = []
user_hist[row.uidx].append((row.iidx, row.ts))
# sort by ts in descending order
# row represents the user, columns represents the item
train_record: List[Tuple[int, int]] = []
val_record: List[Tuple[int, int]] = []
test_record: List[Tuple[int, int]] = []
for uidx, hist in user_hist.items():
ord_hist = [x[0] for x in sorted(hist, key=lambda x: x[1])]
assert(len(ord_hist) >= 20)
for v in ord_hist[:-2]:
train_record.append((uidx, v))
val_record.append((uidx, ord_hist[-2]))
test_record.append((uidx, ord_hist[-1]))
train_dat = np.ones(len(train_record))
val_dat = np.ones(len(val_record))
test_dat = np.ones(len(test_record))
train_npy = np.array(train_record)
val_npy = np.array(val_record)
test_npy = np.array(test_record)
mat_shape = (ratings.uidx.max() + 1, ratings.iidx.max() + 1)
train_csr = sp.csr_matrix((train_dat, (train_npy[:, 0], train_npy[:, 1])),
shape=mat_shape)
val_csr = sp.csr_matrix((val_dat, (val_npy[:, 0], val_npy[:, 1])),
shape=mat_shape)
test_csr = sp.csr_matrix((test_dat, (test_npy[:, 0], test_npy[:, 1])),
shape=mat_shape)
sp.save_npz(train_path, train_csr)
sp.save_npz(val_path, val_csr)
sp.save_npz(test_path, test_csr)
def time_based_split(
ratings: pd.DataFrame,
data_path: str,
min_len: int = 20) -> None:
names = ['uidx', 'iidx', 'rating', 'ts']
if (ratings.columns == names).min() < 1:
raise ValueError(
f"Only support data frame with columns ['uidx', 'iidx', 'rating', 'ts'], the input is {ratings.columns}")
user_hist: Dict[int, List[Tuple[int, float, float]]] = {}
for row in ratings.itertuples():
if row.uidx not in user_hist:
user_hist[row.uidx] = []
user_hist[row.uidx].append((row.iidx, row.rating, row.ts))
# sort by ts in descending order
train_record = {x: [] for x in names}
val_record = {x: [] for x in names}
test_record = {x: [] for x in names}
def put2record(record, u, obs):
record['uidx'].append(u)
record['iidx'].append(obs[0])
record['rating'].append(obs[1])
record['ts'].append(obs[2])
for uidx, hist in user_hist.items():
ord_hist = [x for x in sorted(hist, key=lambda x: x[-1])]
assert(len(ord_hist) >= 20)
for v in ord_hist[:-2]:
put2record(train_record, uidx, v)
put2record(val_record, uidx, ord_hist[-2])
put2record(test_record, uidx, ord_hist[-1])
train_path = os.path.join(data_path, 'train.feather')
pd.DataFrame(train_record).to_feather(train_path)
val_path = os.path.join(data_path, 'val.feather')
pd.DataFrame(val_record).to_feather(val_path)
test_path = os.path.join(data_path, 'test.feather')
pd.DataFrame(test_record).to_feather(test_path)
def ml_1m_v2(data_path: str) -> None:
names = ['uidx', 'iidx', 'rating', 'ts']
dtype = {'uidx': int, 'iidx': int, 'rating': float, 'ts': float}
ratings = pd.read_csv(os.path.join(data_path, 'ratings.dat'),
sep='::',
names=names,
dtype=dtype)
print(ratings.shape)
ratings.uidx = ratings.uidx - 1
ratings.iidx = ratings.iidx - 1
print(ratings.head())
ratings.to_feather(os.path.join(data_path, 'ratings.feather'))
time_based_split(ratings, data_path, 20)
class NegSeqData(data.Dataset):
def __init__(self,
features: List[Tuple[int,
int]],
num_item: int,
num_neg: int = 0,
is_training: bool = False,
seed: int = 123,
past_hist: Optional[Dict[int,
Set[int]]] = None) -> None:
super(NegSeqData, self).__init__()
""" Note that the labels are only useful when training, we thus
add them in the ng_sample() function.
"""
self.features = features
self.num_item = num_item
self.train_set = set(features)
self.num_neg = num_neg
self.is_training = is_training
self.past_hist = past_hist
self.prng = RandomState(seed)
def ng_sample(self) -> None:
self.features_fill = []
for x in self.features:
u, i = x[0], x[1]
j_list = []
for _ in range(self.num_neg):
is_dup = True
while is_dup:
j = self.prng.randint(self.num_item)
is_dup = (u, j) in self.train_set
if self.past_hist is not None:
is_dup = is_dup or j in self.past_hist.get(u, [])
j_list.append(j)
self.features_fill.append([u, i, j_list])
def __len__(self) -> int:
return len(self.features)
def __getitem__(self, idx):
features = self.features_fill if \
self.is_training else self.features
user = features[idx][0]
item_i = features[idx][1]
item_j_list = np.array(features[idx][2]) if \
self.is_training else features[idx][1]
return user, item_i, item_j_list
class NegSampleData(data.Dataset):
def __init__(self,
features: List[Tuple[int,
int]],
num_item: int,
num_neg: int = 0,
is_training: bool = False,
seed: int = 123) -> None:
super(NegSampleData, self).__init__()
""" Note that the labels are only useful when training, we thus
add them in the ng_sample() function.
"""
self.features = features
self.num_item = num_item
self.train_set = set(features)
self.num_neg = num_neg
self.is_training = is_training
self.prng = RandomState(seed)
def ng_sample(self) -> None:
assert self.is_training, 'no need to sample when testing'
self.features_fill = []
for x in self.features:
u, i = x[0], x[1]
for _ in range(self.num_neg):
j = self.prng.randint(self.num_item)
while (u, j) in self.train_set:
j = self.prng.randint(self.num_item)
self.features_fill.append([u, i, j])
def __len__(self) -> int:
return self.num_neg * len(self.features) if \
self.is_training else len(self.features)
def __getitem__(self, idx):
features = self.features_fill if \
self.is_training else self.features
user = features[idx][0]
item_i = features[idx][1]
item_j = features[idx][2] if \
self.is_training else features[idx][1]
return user, item_i, item_j
class RatingData(data.Dataset):
def __init__(self, features: List[Tuple[int, int, float]]) -> None:
super(RatingData, self).__init__()
self.features = features
def __len__(self):
return len(self.features)
def __getitem__(self, idx):
return self.features[idx]
class NegSequenceData(data.Dataset):
def __init__(self, hist: Dict[int, List[int]],
max_len: int,
padding_idx: int,
item_num: int,
num_neg: int = 0,
is_training: bool = False,
past_hist: Optional[Dict[int, Set[int]]] = None,
seed: int = 123,
window: bool = True,
allow_empty: bool =False) -> None:
super(NegSequenceData, self).__init__()
self.max_len = max_len
self.padding_idx = padding_idx
self.num_item = item_num
self.num_neg = num_neg
self.past_hist = past_hist
self.prng = RandomState(seed)
self.logger = logging.getLogger(__name__)
self.logger.debug('Build windowed data')
self.records = []
for uidx, item_list in hist.items():
if window:
for i in range(len(item_list)):
item_slice = item_list[max(0, i - max_len):i]
if not allow_empty and len(item_slice) == 0:
continue
self.records.append([uidx, item_list[i], item_slice])
else:
if not allow_empty and len(item_list) == 1:
continue
self.records.append([uidx, item_list[-1], item_list[-(max_len + 1):-1]])
def __len__(self) -> int:
return len(self.records)
def __getitem__(self, idx):
temp_hist = np.zeros(self.max_len, dtype=int) + self.padding_idx
uidx, pos_item, item_hist = self.records[idx]
assert(len(temp_hist) >= len(item_hist))
if len(item_hist) > 0:
temp_hist[-len(item_hist):] = item_hist
negitem_list = np.zeros(self.num_neg, dtype=int)
for idx in range(self.num_neg):
is_dup = True
while is_dup:
negitem = self.prng.randint(self.num_item)
is_dup = negitem == pos_item
if self.past_hist is not None:
is_dup = is_dup or negitem in self.past_hist.get(uidx, [])
negitem_list[idx] = negitem
return uidx, pos_item, negitem_list, temp_hist
if __name__ == '__main__':
# ml_1m('/mnt/c0r00zy/a()c_gan/data/ml-1m',
# '/mnt/c0r00zy/ac_gan/data/ml-1m/train.npz',
# '/mnt/c0r00zy/ac_gan/data/ml-1m/val.npz',
# '/mnt/c0r00zy/ac_gan/data/ml-1m/test.npz')
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, required=True)
args = parser.parse_args()
ml_1m_v2(args.data_path)
| 11,006
| 34.621359
| 117
|
py
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/acgan/recommender.py
|
from typing import List, Optional, Tuple, Dict, Set
import time
import logging
from tqdm import tqdm # type: ignore
from scipy import sparse as sp # type: ignore
import numpy as np # type: ignore
from sklearn.utils.extmath import randomized_svd # type: ignore
import torch # type: ignore
from torch import nn # type: ignore
from torch.utils import data # type: ignore
import pandas as pd # type: ignore
from numpy.random import RandomState # type: ignore
from acgan.module import PopularModel
from acgan.data import NegSampleData, RatingData, NegSeqData, NegSequenceData
class MultipleOptimizer:
def __init__(self, *op):
self.optimizers = op
def zero_grad(self):
for op in self.optimizers:
op.zero_grad()
def step(self):
for op in self.optimizers:
op.step()
def build_optimizer(lr, *models):
# minimizer
optimizer_list = []
sparse_weight = []
dense_weight = []
for model in models:
sparse_weight.extend(model.get_sparse_weight())
dense_weight.extend(model.get_dense_weight())
if len(sparse_weight) > 0:
optimizer_list.append(torch.optim.SparseAdam(
params=sparse_weight, lr=lr))
if len(dense_weight) > 0:
optimizer_list.append(torch.optim.Adam(params=dense_weight, lr=lr))
if len(optimizer_list) < 1:
raise ValueError('Need at least one dense or sparse weights')
optimizer = MultipleOptimizer(*optimizer_list)
return optimizer
class Recommender:
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
raise NotImplementedError()
return np.zeros(0)
def recommend(self, u_s: int, cand_b: List[int], top_k: int) -> List[int]:
u_b = [u_s] * len(cand_b)
scores = self.score(u_b, cand_b)
top_k_ind = scores.argsort()[::-1][:top_k]
return [cand_b[ind] for ind in top_k_ind]
def fit(self, df: pd.DataFrame) -> None:
raise NotImplementedError()
class PopRecommender(Recommender):
def __init__(self, pop_module: nn.Module) -> None:
self.pop_module = pop_module
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
with torch.no_grad():
device = self.pop_module.get_device()
self.pop_module.eval()
u_b_t = torch.LongTensor(u_b).to(device) # type: ignore
v_b_t = torch.LongTensor(v_b).to(device) # type: ignore
scores = self.pop_module(u_b_t, v_b_t)
return scores.cpu().numpy()
class RandRecommender(Recommender):
def __init__(self, max_u: int, max_v: int) -> None:
self.max_u = max_u
self.max_v = max_v
def fit(self, df: pd.DataFrame) -> None:
pass
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
return np.random.rand(len(u_b))
class UserBasedKnn(Recommender):
def __init__(self, max_u: int, max_v: int) -> None:
self.max_u = max_u
self.max_v = max_v
self.user_item_score = None
def fit(self, df: pd.DataFrame) -> None:
row, col = df.uidx, df.iidx
mat = sp.csr_matrix((df.rating, (row, col)), shape=(self.max_u, self.max_v))
uu_weight = mat.dot(mat.T) + sp.eye(self.max_u)
self.user_item_score = uu_weight.dot(mat)
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
return np.asarray(self.user_item_score[u_b, v_b]).reshape(-1)
class PopRecommenderV2(Recommender):
def __init__(self, max_u: int, max_v: int) -> None:
self.max_u = max_u
self.max_v = max_v
self.pop_module = None
def fit(self, df: pd.DataFrame) -> None:
item_cnt_dict = df.groupby('iidx').count().uidx.to_dict()
item_cnt = np.array([item_cnt_dict.get(iidx, 0) for iidx in range(self.max_v)])
self.pop_module = PopularModel(item_cnt)
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
with torch.no_grad():
device = self.pop_module.get_device()
self.pop_module.eval()
u_b_t = torch.LongTensor(u_b).to(device) # type: ignore
v_b_t = torch.LongTensor(v_b).to(device) # type: ignore
scores = self.pop_module(u_b_t, v_b_t)
return scores.cpu().numpy()
class SVDRecommender(Recommender):
def __init__(self, max_u: int, max_v: int, num_factors: int) -> None:
self.USER_factors = np.zeros((max_u, num_factors))
self.ITEM_factors = np.zeros((max_v, num_factors))
self.num_factors = num_factors
def fit(self, train_mat: sp.csr_matrix) -> None:
U, Sigma, VT = randomized_svd(train_mat,
n_components=self.num_factors,
# n_iter=5,
random_state=None)
s_Vt = sp.diags(Sigma) * VT
self.USER_factors = U
self.ITEM_factors = s_Vt.T
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
s = self.USER_factors[u_b] * self.ITEM_factors[v_b]
return s.sum(1)
class SVDRecommenderV2(Recommender):
def __init__(self, max_u: int, max_v: int, num_factors: int) -> None:
self.USER_factors = np.zeros((max_u, num_factors))
self.ITEM_factors = np.zeros((max_v, num_factors))
self.max_u = max_u
self.max_v = max_v
self.num_factors = num_factors
def fit(self, df: pd.DataFrame) -> None:
row, col = df.uidx, df.iidx
mat = sp.csr_matrix((df.rating, (row, col)), shape=(self.max_u, self.max_v))
U, Sigma, VT = randomized_svd(mat,
n_components=self.num_factors,
# n_iter=5,
random_state=None)
s_Vt = sp.diags(Sigma) * VT
self.USER_factors = U
self.ITEM_factors = s_Vt.T
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
s = self.USER_factors[u_b] * self.ITEM_factors[v_b]
return s.sum(1)
class ContextItemKnn(Recommender):
def __init__(self, max_u: int, max_v: int, item_embed: np.ndarray) -> None:
self.max_u = max_u
self.max_v = max_v
self.ITEM_factors = item_embed
self.USER_factors = np.zeros((max_u, item_embed.shape[1]))
def fit(self, df: pd.DataFrame) -> None:
for uidx, iidx, rating in zip(df.uidx, df.iidx, df.rating):
if rating > 0:
self.USER_factors[uidx, :] += self.ITEM_factors[iidx, :]
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
s = self.USER_factors[u_b] * self.ITEM_factors[v_b]
return s.sum(1)
class BPRRecommender(Recommender):
def __init__(self, max_u: int, max_v: int,
factor_model: nn.Module,
expo_factor: Optional[nn.Module] = None,
expo_thresh: float = 0.05,
expo_compound: float = 1):
self.max_u = max_u
self.max_v = max_v
self.factor_model = factor_model
self.expo_factor = expo_factor
self.expo_thresh = expo_thresh
self.expo_compound = expo_compound
self.logger = logging.getLogger(__name__)
def fit(self,
train_df: pd.DataFrame,
test_df: Optional[pd.DataFrame] = None,
rating_factor: Optional[nn.Module] = None,
expo_model: Optional[Recommender] = None,
past_hist: Optional[Dict[int, Set[int]]] = None,
lr: float = 0.01,
batch_size: int = 2048,
num_neg: int = 1,
num_epochs: int = 50,
lambda_: float = 0.001,
decay: float = 0.0,
delta: float = 10,
cuda: Optional[int] = None) -> None:
if cuda is None:
device = torch.device('cpu')
else:
device = torch.device(f'cuda:{cuda}')
model = self.factor_model
model.to(device)
if self.expo_factor is not None:
self.expo_factor.to(device)
self.expo_factor.eval()
u, v = train_df.uidx.tolist(), train_df.iidx.tolist()
optimizer = build_optimizer(lr, model)
def act_func(x): return torch.sigmoid(torch.clamp(x, min=-8, max=8))
hist = train_df.groupby('uidx').apply(
lambda x: list(zip(x.ts, x.iidx))).to_dict()
for k in hist.keys():
hist[k] = [x[1] for x in sorted(hist[k])]
seq_data = NegSequenceData(
hist,
1,
item_num=self.max_v,
padding_idx=self.max_v,
num_neg=num_neg,
window=True,
past_hist=past_hist,
allow_empty=True)
data_loader = data.DataLoader(
seq_data,
batch_size=batch_size,
shuffle=True,
num_workers=3,
pin_memory=True)
for epoch in tqdm(range(num_epochs)):
model.train()
loss_record = []
for user, item_i, item_j_list, item_hist in data_loader:
optimizer.zero_grad()
model.zero_grad()
# transfer to gpu
bsz = item_hist.shape[0]
user = user.to(device).long() # [B]
item_i = item_i.to(device).long() # [B]
item_j_list = item_j_list.to(device).long() # [B, num_neg]
#item_hist = item_hist.to(device).long() # [B, max_len]
# reshape
item_i_list = item_i.view(-1, 1).repeat(1, num_neg) # [B, num_neg]
users = user.unsqueeze(1).repeat(
1, num_neg) # [B, num_neg]
prediction_i = model(users, item_i_list) # [B, num_neg]
prediction_j = model(
users, item_j_list) # [B, num_neg]
g_loss = -(prediction_i - prediction_j).sigmoid().log()
g_loss = g_loss.mean()
l2_loss = decay * model.get_l2(users, item_i_list)
l2_loss += decay * model.get_l2(users, item_j_list)
target = g_loss + l2_loss
target.backward()
optimizer.step()
loss_record.append(
(target.item(), g_loss.item(), l2_loss.item()))
loss_np = np.array(loss_record)
#self.logger.debug(
# f'target: {np.mean(loss_np[:, 0]):.5f},loss: {np.mean(loss_np[:, 1]):.5f}, l2: {np.mean(loss_np[:, 2]):.5f}')
if test_df is not None:
model.eval()
rating_model = None
if rating_factor is not None:
rating_model = ClassRecommender(
self.max_u, self.max_v, rating_factor)
unbiased_eval(self.max_u, self.max_v, test_df, self,
rel_model=rating_model,
cut_len=10,
expo_model=expo_model,
past_hist=past_hist)
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
with torch.no_grad():
device = self.factor_model.get_device()
self.factor_model.eval()
u_b_t = torch.LongTensor(u_b).to(device) # type: ignore
v_b_t = torch.LongTensor(v_b).to(device) # type: ignore
u_b_t.to(device) # type: ignore
v_b_t.to(device) # type: ignore
scores = self.factor_model(u_b_t, v_b_t)
return scores.cpu().numpy()
class ClassRecommender(Recommender):
def __init__(self, max_u: int, max_v: int,
factor_model: nn.Module,
expo_factor: Optional[nn.Module] = None,
expo_thresh: float = 0.05,
expo_compound: float = 1) -> None:
self.max_u = max_u
self.max_v = max_v
self.factor_model = factor_model
self.expo_factor = expo_factor
self.expo_thresh = expo_thresh
self.expo_compound = expo_compound
self.logger = logging.getLogger(__name__)
def fit(self,
train_df: pd.DataFrame,
test_df: Optional[pd.DataFrame] = None,
rating_factor: Optional[nn.Module] = None,
expo_model: Optional[Recommender] = None,
past_hist: Optional[Dict[int, Set[int]]] = None,
lr: float = 0.01,
batch_size: int = 2048,
num_neg: int = 1,
num_epochs: int = 50,
lambda_: float = 0.001,
decay: float = 0.0,
delta: float = 10,
cuda: Optional[int] = None) -> None:
if cuda is None:
device = torch.device('cpu')
else:
device = torch.device(f'cuda:{cuda}')
model = self.factor_model
model.to(device)
if self.expo_factor is not None:
self.expo_factor.to(device)
self.expo_factor.eval()
#u, v = train_df.uidx.tolist(), train_df.iidx.tolist()
optimizer = build_optimizer(lr, model)
def act_func(x): return torch.sigmoid(torch.clamp(x, min=-8, max=8))
hist = train_df.groupby('uidx').apply(
lambda x: list(zip(x.ts, x.iidx))).to_dict()
for k in hist.keys():
hist[k] = [x[1] for x in sorted(hist[k])]
seq_data = NegSequenceData(
hist,
1,
item_num=self.max_v,
padding_idx=self.max_v,
num_neg=num_neg,
window=True,
past_hist=past_hist,
allow_empty=True)
data_loader = data.DataLoader(
seq_data,
batch_size=batch_size,
shuffle=True,
num_workers=3,
pin_memory=True)
for epoch in tqdm(range(num_epochs)):
model.train()
loss_record = []
for user, item_i, item_j_list, item_hist in data_loader:
optimizer.zero_grad()
model.zero_grad()
# transfer to gpu
bsz = item_hist.shape[0]
user = user.to(device).long() # [B]
item_i = item_i.to(device).long() # [B]
item_j_list = item_j_list.to(device).long() # [B, num_neg]
#item_hist = item_hist.to(device).long() # [B, max_len]
# reshape
item_i = item_i.view(-1, 1) # [B, 1]
items = torch.cat([item_i, item_j_list],
dim=1) # [B, 1 + num_neg]
labels = (torch.arange(1 + num_neg).to(device)
< 1).float().repeat(bsz).view(bsz, -1) # [B, 1 + num_neg]
users = user.unsqueeze(1).repeat(
1, 1 + num_neg) # [B, 1 + num_neg]
g_s = model(users, items)
g_prob = act_func(g_s)
if self.expo_factor is not None:
expo_score = self.expo_factor(users, items)
expo_prob = act_func(expo_score) ** self.expo_compound
expo_prob = torch.clamp(expo_prob, min=self.expo_thresh)
g_loss = -1 * (labels * torch.log(g_prob) +
(1 - labels) * torch.log(1 - g_prob)) / expo_prob
else:
g_loss = -1 * (labels * torch.log(g_prob) +
(1 - labels) * torch.log(1 - g_prob))
g_loss = g_loss.mean()
l2_loss = decay * model.get_l2(user, items)
target = g_loss + l2_loss
target.backward()
optimizer.step()
loss_record.append(
(target.item(), g_loss.item(), l2_loss.item()))
loss_np = np.array(loss_record)
#self.logger.debug(
# f'target: {np.mean(loss_np[:, 0]):.5f},loss: {np.mean(loss_np[:, 1]):.5f}, l2: {np.mean(loss_np[:, 2]):.5f}')
if test_df is not None:
model.eval()
rating_model = None
if rating_factor is not None:
rating_model = ClassRecommender(
self.max_u, self.max_v, rating_factor)
unbiased_eval(self.max_u, self.max_v, test_df, self,
rel_model=rating_model,
cut_len=10,
expo_model=expo_model,
past_hist=past_hist)
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
with torch.no_grad():
device = self.factor_model.get_device()
self.factor_model.eval()
u_b_t = torch.LongTensor(u_b).to(device) # type: ignore
v_b_t = torch.LongTensor(v_b).to(device) # type: ignore
u_b_t.to(device) # type: ignore
v_b_t.to(device) # type: ignore
scores = self.factor_model(u_b_t, v_b_t)
return scores.cpu().numpy()
class RatingEstimator(Recommender):
def __init__(self, max_u: int, max_v: int, factor_model: nn.Module):
self.max_u = max_u
self.max_v = max_v
self.factor_model = factor_model
def fit(self,
features: List[Tuple[int, int, float]],
lr: float = 0.01,
batch_size: int = 2048,
num_neg: int = 1,
num_epochs: int = 50,
lambda_: float = 0.001,
decay: float = 0.0,
cuda: Optional[int] = None) -> None:
if cuda is None:
device = torch.device('cpu')
else:
device = torch.device(f'cuda:{cuda}')
rating_data = RatingData(features)
train_loader = torch.utils.data.DataLoader(
rating_data, batch_size=batch_size, shuffle=True, num_workers=2)
model = self.factor_model
model.to(device)
# minimizer
sp_minimizer = torch.optim.SparseAdam(
params=model.get_sparse_weight(), lr=lr)
ds_minimizer = torch.optim.Adam(params=model.get_dense_weight(), lr=lr)
optimizer = MultipleOptimizer(sp_minimizer, ds_minimizer)
loss_func = torch.nn.MSELoss()
for epoch in tqdm(range(num_epochs)):
model.train()
loss_metric = []
for user, item, rating in train_loader:
optimizer.zero_grad()
model.zero_grad()
user = user.to(device).long()
item = item.to(device).long()
rating = rating.to(device).float()
pred_rating = model(user, item)
loss = loss_func(pred_rating, rating)
l2_loss = decay * model.get_l2(user, item)
target = loss + l2_loss
target.backward()
optimizer.step()
loss_metric.append(loss.item())
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
with torch.no_grad():
device = self.factor_model.embed_item.weight.device
self.factor_model.eval()
u_b_t = torch.LongTensor(u_b).to(device) # type: ignore
v_b_t = torch.LongTensor(v_b).to(device) # type: ignore
u_b_t.to(device) # type: ignore
v_b_t.to(device) # type: ignore
scores = self.factor_model(u_b_t, v_b_t)
return scores.cpu().numpy()
class DeepRecommender(Recommender):
def __init__(self, max_u: int, max_v: int,
seq_model: nn.Module,
expo_factor: Optional[nn.Module] = None,
expo_thresh: float = 0.05,
expo_compound: float = 1,
expo_isdeep:bool = False):
self.max_u = max_u
self.max_v = max_v
self.seq_model = seq_model
self.max_len = self.seq_model.max_len
self.padding_idx = self.seq_model.padding_idx
self.expo_factor = expo_factor
self.expo_thresh = expo_thresh
self.expo_compound = expo_compound
self.logger = logging.getLogger(__name__)
self.user_records = None
self.expo_isdeep = expo_isdeep
def set_user_record(self, user_record: Dict[int, List[int]]):
self.user_records = user_record
def fit(self,
train_df: pd.DataFrame,
test_df: Optional[pd.DataFrame] = None,
rating_factor: Optional[nn.Module] = None,
expo_model: Optional[Recommender] = None,
past_hist: Optional[Dict[int, Set[int]]] = None,
lr: float = 0.01,
batch_size: int = 2048,
num_neg: int = 1,
num_epochs: int = 50,
lambda_: float = 0.001,
decay: float = 0.0,
delta: float = 10,
window: bool = True,
cuda: Optional[int] = None) -> None:
if cuda is None:
device = torch.device('cpu')
else:
device = torch.device(f'cuda:{cuda}')
model = self.seq_model
model.to(device)
if self.expo_factor is not None:
self.expo_factor.to(device)
self.expo_factor.eval()
optimizer = build_optimizer(lr, model)
def act_func(x): return torch.sigmoid(torch.clamp(x, min=-8, max=8))
hist = train_df.groupby('uidx').apply(
lambda x: list(zip(x.ts, x.iidx))).to_dict()
for k in hist.keys():
hist[k] = [x[1] for x in sorted(hist[k])]
self.set_user_record(hist)
seq_data = NegSequenceData(
hist, self.max_len,
item_num=self.max_v,
padding_idx=self.padding_idx,
num_neg=num_neg,
window=window,
past_hist=past_hist)
train_loader = data.DataLoader(
seq_data,
batch_size=batch_size,
shuffle=True,
num_workers=3,
pin_memory=True)
for epoch in tqdm(range(num_epochs)):
model.train()
loss_record = []
for user, item_i, item_j_list, item_hist in train_loader:
optimizer.zero_grad()
model.zero_grad()
bsz = item_hist.shape[0]
user = user.to(device).long()
item_i = item_i.to(device).long()
item_j_list = item_j_list.to(device).long()
item_hist = item_hist.to(device).long()
item_i = item_i.view(-1, 1) # [B, 1]
items = torch.cat([item_i, item_j_list],
dim=1) # [B, 1 + num_neg]
labels = (torch.arange(1 + num_neg).to(device)
< 1).float().repeat(bsz).view(bsz, -1) # [B, 1 + num_neg]
users = user.unsqueeze(1).repeat(
1, 1 + num_neg) # [B, 1 + num_neg]
g_s = model(items, item_hist)
g_prob = act_func(g_s)
if self.expo_factor is not None:
if self.expo_isdeep:
expo_score = self.expo_factor(items, item_hist)
else:
expo_score = self.expo_factor(users, items)
expo_prob = act_func(expo_score) ** self.expo_compound
expo_prob = torch.clamp(expo_prob, min=self.expo_thresh)
g_loss = -1 * (labels * torch.log(g_prob) +
(1 - labels) * torch.log(1 - g_prob)) / expo_prob
else:
g_loss = -1 * (labels * torch.log(g_prob) +
(1 - labels) * torch.log(1 - g_prob))
g_loss = g_loss.mean()
l2_loss = decay * g_loss * 0 # model.get_l2(user, items)
target = g_loss + l2_loss
target.backward()
optimizer.step()
loss_record.append(
(target.item(), g_loss.item(), l2_loss.item()))
loss_np = np.array(loss_record)
#self.logger.debug(
# f'target: {np.mean(loss_np[:, 0]):.5f},loss: {np.mean(loss_np[:, 1]):.5f}, l2: {np.mean(loss_np[:, 2]):.5f}')
if test_df is not None:
model.eval()
unbiased_eval(self.max_u, self.max_v, test_df, self,
rel_model=None,
cut_len=10,
expo_model=None,
past_hist=past_hist)
def score(self, u_b: List[int], v_b: List[int]) -> np.ndarray:
assert(self.user_records is not None)
temp_hist = np.zeros(self.max_len, dtype=int) + self.padding_idx
item_hist = self.user_records[u_b[0]]
if len(item_hist) == 0:
return np.zeros(len(v_b))
temp_hist[-len(item_hist):] = item_hist[-self.max_len:]
temp_hist = temp_hist.reshape(1, -1)
with torch.no_grad():
device = self.seq_model.get_device()
self.seq_model.eval()
v_b_t = torch.LongTensor(v_b).to(device) # [num_item]
v_b_t = v_b_t.view(1, -1) # [1, num_item]
temp_hist = torch.from_numpy(temp_hist).to(device) # [1, max_len]
scores = self.seq_model(v_b_t, temp_hist).flatten()
return scores.cpu().numpy()
def unbiased_eval(num_user: int, num_item: int, dat_df: pd.DataFrame,
recom: Recommender, rel_model: Optional[Recommender] = None,
expo_model: Optional[Recommender] = None,
past_hist: Optional[Dict[int, Set[int]]] = None, expo_compound: float = 1.0,
epsilon: float = 1.0, num_neg: int = 100, cut_len: int = 10, seed: int = 886):
logger = logging.getLogger(__name__)
# this is to make sure comparision between models is fair yet not affect the negative sampling's variation
prng = RandomState(seed)
row, col = dat_df.uidx, dat_df.iidx
def sigmoid(x): return np.exp(x) / (1 + np.exp(x))
recall_cnt = 0
ndcg_sum = 0
for u, i in list(zip(row, col)):
if past_hist is None:
neg = prng.randint(0, num_item, num_neg)
neg = neg[neg != i]
else:
neg = prng.randint(0, num_item, num_neg)
for idx in range(num_neg):
if int(neg[idx]) in past_hist.get(u, []) or i == neg[idx]:
while int(
neg[idx]) not in past_hist.get(
u, []) and i != neg[idx]:
neg[idx] = prng.randint(0, num_item)
item_list: List[int] = neg.tolist()
item_list.append(i)
user_list = [u] * len(item_list)
scores = recom.score(user_list, item_list)
if rel_model is not None:
rel_score = rel_model.score(user_list, item_list)
rel_prob = sigmoid(rel_score - epsilon)
else:
rel_prob = np.ones(len(scores))
expo_score = 1
if expo_model is not None:
expo_score = sigmoid(expo_model.score([u], [i])[0]) ** expo_compound
rank = scores.argsort()[::-1]
item_npy = np.array(item_list)
top_items = item_npy[rank][:cut_len]
top_item_rel_prob = rel_prob[rank][:cut_len]
#recall_cnt += int(i in top_items)
for pos, (top_i, top_rel) in enumerate(
zip(top_items, top_item_rel_prob)):
if i == top_i:
recall_cnt += (top_rel / expo_score)
ndcg_sum += np.log(2) / np.log(2 + pos) * \
(top_rel / expo_score)
logger.info(
f'Recall@{cut_len} = {recall_cnt / len(row):.5f}; NDCG@{cut_len} = {ndcg_sum / len(row):.5f}')
return recall_cnt / len(row)
def ac_train_v2(f_model: torch.nn.Module,
g_model: torch.nn.Module,
beta_model: torch.nn.Module,
tr_df: pd.DataFrame,
user_num: int,
item_num: int,
val_df: Optional[pd.DataFrame] = None,
rating_model: Optional[Recommender] = None,
expo_model: Optional[Recommender] = None,
past_hist: Optional[Dict[int, Set[int]]] = None,
num_epochs: int = 50,
batch_size: int = 2048,
min_prob: float = 0.1,
num_neg: int = 1,
cuda_idx: int = 0,
min_delta: float = 0.1,
lr: float = 0.01,
f_round_ahead: int = 1,
g_round_ahead: int = 1,
decay: float = 0.0):
logger = logging.getLogger(__name__)
with torch.cuda.device(cuda_idx):
f_recommender = ClassRecommender(user_num, item_num, f_model)
g_recommender = ClassRecommender(user_num, item_num, g_model)
u, v = tr_df.uidx.tolist(), tr_df.iidx.tolist()
minimizer = build_optimizer(lr, f_model, beta_model)
maximizer = build_optimizer(lr, g_model)
loss_func = torch.nn.BCELoss(reduction='none')
def act_func(x): return torch.sigmoid(torch.clamp(x, min=-8, max=8))
#device_cuda = torch.device(f'cuda:{cuda_idx}')
f_model.cuda()
g_model.cuda()
beta_model.cuda()
def train_epoch(optimizer, data_loader, flag='g_train'):
f_loss_record, g_loss_record = [], []
# train the g_model for one epoch
for c_round in range(g_round_ahead):
for user, item_pos, item_neg_list in data_loader:
f_model.zero_grad()
g_model.zero_grad()
beta_model.zero_grad()
optimizer.zero_grad()
f_model.train()
g_model.train()
beta_model.train()
user = user.long().cuda()
item_pos = item_pos.long().cuda()
item_neg_list = item_neg_list.cuda().long()
item_neg = item_neg_list.flatten()
user_for_neg = user.reshape(
1, -1).repeat(num_neg, 1).t().flatten()
user = torch.cat([user, user_for_neg], dim=0).long()
items = torch.cat([item_pos, item_neg], dim=0).long()
labels = torch.cat([torch.ones(len(item_pos)).cuda(
), torch.zeros(len(item_neg)).cuda()], dim=0).float()
f_s = f_model(user, items)
g_s = g_model(user, items)
q_s = beta_model(user, items, g_s, labels)
f_prob = torch.clamp(act_func(f_s), min=0.01, max=1)
g_prob = torch.clamp(act_func(g_s), min=0.01, max=1)
q_prob = torch.clamp(act_func(q_s), min=min_prob, max=1)
f_loss = -1 * (labels * torch.log(f_prob) +
(1 - labels) * torch.log(1 - f_prob)) / q_prob
g_loss = -1 * (labels * torch.log(g_prob) +
(1 - labels) * torch.log(1 - g_prob))
if flag == 'g_train':
target = (
torch.clamp(
min_delta + g_loss - f_loss,
min=0)).mean() # g wants to maximize the gap
target += decay * g_model.get_l2(user, items)
target.backward()
elif flag == 'f_train':
target = f_loss.mean()
target += decay * \
f_model.get_l2(user, items) + decay * \
beta_model.get_l2(user, items)
target.backward()
else:
raise ValueError('use g_train or f_train')
optimizer.step()
with torch.no_grad():
f_loss = f_loss.mean()
g_loss = g_loss.mean()
f_loss_record.append(f_loss.item())
g_loss_record.append(g_loss.item())
logger.info(
f'{flag} at {c_round} round -- f_loss: {np.mean(f_loss_record)} g_loss: {np.mean(g_loss_record)}')
# pre-fit the g without adjusting
g_recommender.fit(tr_df,
num_epochs=0,
cuda=cuda_idx,
decay=decay)
neg_data = NegSeqData(list(zip(u, v)), item_num,
num_neg=num_neg, past_hist=past_hist)
neg_data.is_training = True
for epoch in range(num_epochs):
neg_data.ng_sample()
data_loader = data.DataLoader(
neg_data,
batch_size=batch_size,
shuffle=True,
num_workers=2,
pin_memory=True)
logger.info(f'Epoch -- {epoch}')
minimizer.zero_grad()
maximizer.zero_grad()
train_epoch(minimizer, data_loader, 'f_train')
train_epoch(maximizer, data_loader, 'g_train')
if val_df is not None:
logger.info('f_model:')
logger.info('--unbiased--')
unbiased_eval(
user_num,
item_num,
val_df,
f_recommender,
rel_model=rating_model,
expo_model=expo_model,
past_hist=past_hist)
logger.info('g_model:')
logger.info('--unbiased--')
unbiased_eval(
user_num,
item_num,
val_df,
g_recommender,
rel_model=rating_model,
expo_model=expo_model,
past_hist=past_hist)
def ac_train_v3(f_model: torch.nn.Module,
is_f_seq: bool,
g_model: torch.nn.Module,
is_g_seq: bool,
beta_model: torch.nn.Module,
tr_df: pd.DataFrame,
user_num: int,
item_num: int,
val_df: Optional[pd.DataFrame] = None,
rating_model: Optional[Recommender] = None,
expo_model: Optional[Recommender] = None,
past_hist: Optional[Dict[int, Set[int]]] = None,
g_weight: float = 1.0,
num_epochs: int = 50,
batch_size: int = 2048,
min_prob: float = 0.1,
num_neg: int = 1,
cuda_idx: int = 0,
min_delta: float = 0.1,
lr: float = 0.01,
decay: float = 0.0,
expo_compound: float = 1.0,
epsilon: float = 1.0):
logger = logging.getLogger(__name__)
with torch.cuda.device(cuda_idx):
if is_f_seq:
f_recommender = DeepRecommender(user_num, item_num, f_model)
else:
f_recommender = ClassRecommender(user_num, item_num, f_model)
if is_g_seq:
g_recommender = DeepRecommender(user_num, item_num, g_model)
else:
g_recommender = ClassRecommender(user_num, item_num, g_model)
minimizer = build_optimizer(lr, f_model, beta_model)
maximizer = build_optimizer(lr, g_model)
loss_func = torch.nn.BCELoss(reduction='none')
def act_func(x): return torch.sigmoid(torch.clamp(x, min=-8, max=8))
#device_cuda = torch.device(f'cuda:{cuda_idx}')
f_model.cuda()
g_model.cuda()
beta_model.cuda()
def train_epoch(optimizer, data_loader, flag, is_f_seq, is_g_seq, round_repeat=1):
f_loss_record, g_loss_record = [], []
q_prob_record = []
# train the g_model for one epoch
for c_round in range(round_repeat):
for user, item_i, item_j_list, item_hist in data_loader:
f_model.zero_grad()
g_model.zero_grad()
beta_model.zero_grad()
optimizer.zero_grad()
f_model.train()
g_model.train()
beta_model.train()
# transfer to gpu
bsz = item_hist.shape[0]
user = user.cuda().long() # [B]
item_i = item_i.cuda().long() # [B]
item_j_list = item_j_list.cuda().long() # [B, num_neg]
item_hist = item_hist.cuda().long() # [B, max_len]
# reshape
item_i = item_i.view(-1, 1) # [B, 1]
items = torch.cat([item_i, item_j_list],
dim=1) # [B, 1 + num_neg]
labels = (torch.arange(1 + num_neg).cuda()
< 1).float().repeat(bsz).view(bsz, -1) # [B, 1 + num_neg]
users = user.unsqueeze(1).repeat(
1, 1 + num_neg) # [B, 1 + num_neg]
f_s = f_model(items, item_hist) if is_f_seq else f_model(
users, items)
g_s = g_model(items, item_hist) if is_g_seq else g_model(
users, items)
q_s = beta_model(users, items, g_s, labels)
f_prob = torch.clamp(act_func(f_s), min=0.01, max=1)
g_prob = torch.clamp(act_func(g_s), min=0.01, max=1)
q_prob = torch.clamp(act_func(q_s), min=min_prob, max=1)
f_loss = -1 * (labels * torch.log(f_prob) +
(1 - labels) * torch.log(1 - f_prob)) / q_prob
g_loss = -1 * (labels * torch.log(g_prob) +
(1 - labels) * torch.log(1 - g_prob))
if flag == 'g_train':
target = (
torch.clamp(
min_delta + g_weight * g_loss - f_loss,
min=0)).mean() # g wants to maximize the gap
target += decay * g_model.get_l2(user, items)
target.backward()
elif flag == 'f_train':
target = f_loss.mean()
target += decay * \
f_model.get_l2(user, items) + decay * \
beta_model.get_l2(user, items)
target.backward()
else:
raise ValueError('use g_train or f_train')
optimizer.step()
with torch.no_grad():
f_loss = f_loss.mean()
g_loss = g_loss.mean()
f_loss_record.append(f_loss.item())
g_loss_record.append(g_loss.item())
q_prob_record.append(q_prob.mean().item())
logger.info(
f'{flag} at {c_round} round -- f_loss: {np.mean(f_loss_record)} g_loss: {np.mean(g_loss_record)}, q_prob: {np.mean(q_prob_record)}')
hist = tr_df.groupby('uidx').apply(
lambda x: list(zip(x.ts, x.iidx))).to_dict()
for k in hist.keys():
hist[k] = [x[1] for x in sorted(hist[k])]
if is_f_seq:
f_recommender.set_user_record(hist)
if is_g_seq:
g_recommender.set_user_record(hist)
padding_idx = item_num + 1
max_len = 1
if is_f_seq:
max_len = f_model.max_len
elif is_g_seq:
max_len = g_model.max_len
f_seq_data = NegSequenceData(
hist,
max_len,
item_num=item_num,
padding_idx=padding_idx,
num_neg=num_neg,
window=True,
past_hist=past_hist,
allow_empty=not is_f_seq)
f_train_loader = data.DataLoader(
f_seq_data,
batch_size=batch_size,
shuffle=True,
num_workers=3,
pin_memory=True)
g_seq_data = NegSequenceData(
hist,
max_len,
item_num=item_num,
padding_idx=padding_idx,
num_neg=num_neg,
window=True,
past_hist=past_hist,
allow_empty=not is_g_seq)
g_train_loader = data.DataLoader(
g_seq_data,
batch_size=batch_size,
shuffle=True,
num_workers=3,
pin_memory=True)
for epoch in range(num_epochs):
logger.info(f'Epoch -- {epoch}')
minimizer.zero_grad()
maximizer.zero_grad()
train_epoch(minimizer, f_train_loader,
'f_train', is_f_seq, is_g_seq)
train_epoch(maximizer, g_train_loader,
'g_train', is_f_seq, is_g_seq)
logger.info(f'beta_model: {beta_model.alpha.item()}, {beta_model.beta.item()}, {beta_model.label_coef.item()}')
if val_df is not None:
logger.info('f_model:')
logger.info('--unbiased--')
unbiased_eval(
user_num,
item_num,
val_df,
f_recommender,
epsilon=epsilon,
rel_model=rating_model,
expo_model=expo_model,
past_hist=past_hist,
expo_compound=expo_compound)
logger.info('g_model:')
logger.info('--unbiased--')
unbiased_eval(
user_num,
item_num,
val_df,
g_recommender,
epsilon=epsilon,
rel_model=rating_model,
expo_model=expo_model,
past_hist=past_hist,
expo_compound=expo_compound)
| 42,175
| 38.306617
| 152
|
py
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/acgan/eval.py
|
from typing import List
from scipy import sparse as sp #type: ignore
import numpy as np #type: ignore
from sklearn.utils.extmath import randomized_svd #type: ignore
from tqdm import tqdm #type: ignore
from acgan.recommender import Recommender
| 246
| 23.7
| 62
|
py
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/acgan/__init__.py
| 0
| 0
| 0
|
py
|
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/data/lastfm/lastfm.py
|
"""
from http://files.grouplens.org/datasets/hetrec2011/hetrec2011-lastfm-2k.zip
"""
import os
import re
import sys
import gzip
import json
from datetime import datetime
import pandas as pd
import numpy as np
from acgan.data import time_based_split
from sklearn.preprocessing import LabelEncoder
data_path='.'
names = ['user_id', 'item_id', 'tag', 'ts']
dtype = {'user_id':int, 'item_id':int, 'tag':int, 'ts':float}
ratings = pd.read_csv(os.path.join(data_path, 'user_taggedartists-timestamps.dat'),
sep='\t',
names=names,
dtype=dtype, skiprows=1)
print(f'frame shape: {ratings.shape}')
# first stage filter on item
valid_ratings = ratings
item_view_count = valid_ratings.groupby('item_id').count().user_id.reset_index()
item_view_count = item_view_count[(item_view_count.user_id > 20)]
item_view_count = item_view_count.item_id.to_frame()
valid_ratings = pd.merge(left=valid_ratings, right=item_view_count, on='item_id')
print(f'frame shape: {valid_ratings.shape}')
# second stage filter on user
user_view_count = valid_ratings.groupby('user_id').count().item_id.reset_index()
user_view_count = user_view_count[(user_view_count.item_id > 20) & (user_view_count.item_id < 1000)]
user_view_count = user_view_count.user_id.to_frame()
valid_ratings = pd.merge(left=valid_ratings, right=user_view_count, on='user_id')
print(f'frame shape: {valid_ratings.shape}')
print(valid_ratings.shape)
valid_ratings['uidx'] = LabelEncoder().fit_transform(valid_ratings.user_id)
valid_ratings['iidx'] = LabelEncoder().fit_transform(valid_ratings.item_id)
print(min(valid_ratings.tag))
valid_ratings['rating'] = (valid_ratings.tag > 0).astype(np.float32)
valid_ratings = valid_ratings[['uidx', 'iidx', 'rating', 'ts']]
print(f'max uidx:{valid_ratings.uidx.max()}, max iidx:{valid_ratings.iidx.max()}')
print(valid_ratings)
valid_ratings.to_feather(os.path.join(data_path, 'ratings.feather'))
time_based_split(valid_ratings, data_path, 20)
| 1,932
| 35.471698
| 100
|
py
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/data/ml-1m/ml_1m.py
|
#download data from :http://files.grouplens.org/datasets/movielens/ml-1m.zip
import os
import pandas as pd
import numpy as np
from acgan.data import time_based_split
data_path='.'
names = ['uidx', 'iidx', 'rating', 'ts']
dtype = {'uidx':int, 'iidx':int, 'rating':float, 'ts':float}
ratings = pd.read_csv(os.path.join(data_path, 'ratings.dat'),
sep='::',
names=names,
dtype=dtype)
print(ratings.shape)
ratings.uidx = ratings.uidx - 1
ratings.iidx = ratings.iidx - 1
print(ratings.head())
ratings.to_feather(os.path.join(data_path, 'ratings.feather'))
time_based_split(ratings, data_path, 20)
| 596
| 27.428571
| 76
|
py
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System
|
Adversarial-Counterfactual-Learning-and-Evaluation-for-Recommender-System-main/data/books/book_data.py
|
"""
Steps to download the data:
pip install gdown
gdown 'https://drive.google.com/uc?id=1roQnVtWxVE1tbiXyabrotdZyUY7FA82W'
or go to: https://github.com/MengtingWan/goodreads
"""
import os
import re
import sys
import gzip
import json
from datetime import datetime
import pandas as pd
import numpy as np
from acgan.data import time_based_split
from sklearn.preprocessing import LabelEncoder
def load_data(file_name, head = 500):
count = 0
data = []
with gzip.open(file_name) as fin:
for l in fin:
d = json.loads(l)
count += 1
user_id = d['user_id']
item_id = d['book_id']
rating = float(d['rating'])
ts_str = d['date_updated'].strip().split(' ')
ts_str = ' '.join(ts_str[:4] + ts_str[-1:])
ts = datetime.strptime(ts_str, '%c').timestamp()
x = [user_id, item_id, rating, ts]
data.append(x)
# break if reaches the 100th line
if (head is not None) and (count > head):
break
df = pd.DataFrame(data, columns=['user_id', 'item_id', 'rating', 'ts'])
return df
data_path = '.'
books_df = load_data('goodreads_reviews_history_biography.json.gz', head=None)
valid_books = books_df
print(valid_books.shape)
# first stage filter on item
item_view_count = valid_books.groupby('item_id').count().user_id.reset_index()
item_view_count = item_view_count[(item_view_count.user_id > 20)]
item_view_count = item_view_count.item_id.to_frame()
valid_books = pd.merge(left=valid_books, right=item_view_count, on='item_id')
print(valid_books.shape)
# second stage filter on user
user_view_count = valid_books.groupby('user_id').count().item_id.reset_index()
user_view_count = user_view_count[(user_view_count.item_id > 20) & (user_view_count.item_id < 1000)]
user_view_count = user_view_count.user_id.to_frame()
valid_books = pd.merge(left=valid_books, right=user_view_count, on='user_id')
print(valid_books.shape)
valid_books['uidx'] = LabelEncoder().fit_transform(valid_books.user_id)
valid_books['iidx'] = LabelEncoder().fit_transform(valid_books.item_id)
valid_books = valid_books[['uidx', 'iidx', 'rating', 'ts']]
print(f'max uidx:{valid_books.uidx.max()}, max iidx:{valid_books.iidx.max()}')
print(valid_books)
valid_books.to_feather(os.path.join(data_path, 'ratings.feather'))
time_based_split(valid_books, data_path, 20)
| 2,414
| 34
| 100
|
py
|
imgclsmob
|
imgclsmob-master/eval_ch.py
|
"""
Script for evaluating trained model on Chainer (validate/test).
"""
import os
import time
import logging
import argparse
from sys import version_info
from chainer import global_config
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook
from common.logger_utils import initialize_logging
from chainer_.utils import prepare_ch_context, prepare_model, Predictor
from chainer_.utils import get_composite_metric, report_accuracy
from chainer_.dataset_utils import get_dataset_metainfo
from chainer_.dataset_utils import get_val_data_source, get_test_data_source
from chainer_.chainercv2.models.model_store import _model_sha1
def add_eval_parser_arguments(parser):
"""
Create python script parameters (for eval specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters")
parser.add_argument(
"--calc-flops-only",
dest="calc_flops_only",
action="store_true",
help="calculate FLOPs without quality estimation")
parser.add_argument(
"--data-subset",
type=str,
default="val",
help="data subset. options are val and test")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="chainer, chainercv",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="cupy-cuda110, cupy-cuda112, chainer, chainercv",
help="list of pip packages for logging")
parser.add_argument(
"--disable-cudnn-autotune",
action="store_true",
help="disable cudnn autotune for segmentation models")
parser.add_argument(
"--show-progress",
action="store_true",
help="show progress bar")
parser.add_argument(
"--all",
action="store_true",
help="test all pretrained models for partucular dataset")
def parse_args():
"""
Create python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate a model for image classification/segmentation (Chainer)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K",
help="dataset name. options are ImageNet1K, CUB200_2011, CIFAR10, CIFAR100, SVHN, VOC2012, ADE20K, Cityscapes, "
"COCO")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_eval_parser_arguments(parser)
args = parser.parse_args()
return args
def calc_model_accuracy(net,
test_data,
metric,
calc_weight_count=False,
calc_flops_only=True,
extended_log=False):
"""
Main test routine.
Parameters:
----------
net : Chain
Model.
test_data : dict
Data loader.
metric : EvalMetric
Metric object instance.
calc_weight_count : bool, default False
Whether to calculate count of weights.
extended_log : bool, default False
Whether to log more precise accuracy values.
ml_type : str, default 'imgcls'
Machine learning type.
Returns:
-------
list of floats
Accuracy values.
"""
tic = time.time()
predictor = Predictor(
model=net,
transform=None)
if calc_weight_count:
weight_count = net.count_params()
logging.info("Model: {} trainable parameters".format(weight_count))
if not calc_flops_only:
in_values, out_values, rest_values = apply_to_iterator(
func=predictor,
iterator=test_data["iterator"],
hook=ProgressHook(test_data["ds_len"]))
assert (len(rest_values) == 1)
assert (len(out_values) == 1)
assert (len(in_values) == 1)
if True:
labels = iter(rest_values[0])
preds = iter(out_values[0])
inputs = iter(in_values[0])
for label, pred, inputi in zip(labels, preds, inputs):
metric.update(label, pred)
del label
del pred
del inputi
else:
import numpy as np
metric.update(
labels=np.array(list(rest_values[0])),
preds=np.array(list(out_values[0])))
accuracy_msg = report_accuracy(
metric=metric,
extended_log=extended_log)
logging.info("Test: {}".format(accuracy_msg))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
acc_values = metric.get()[1]
acc_values = acc_values if type(acc_values) == list else [acc_values]
else:
acc_values = []
return acc_values
def test_model(args):
"""
Main test routine.
Parameters:
----------
args : ArgumentParser
Main script arguments.
Returns:
-------
float
Main accuracy value.
"""
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
assert (ds_metainfo.ml_type != "imgseg") or (args.batch_size == 1)
assert (ds_metainfo.ml_type != "imgseg") or args.disable_cudnn_autotune
global_config.train = False
use_gpus = prepare_ch_context(args.num_gpus)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
use_gpus=use_gpus,
net_extra_kwargs=ds_metainfo.test_net_extra_kwargs,
num_classes=(args.num_classes if ds_metainfo.ml_type != "hpe" else None),
in_channels=args.in_channels)
assert (hasattr(net, "classes") or (ds_metainfo.ml_type == "hpe"))
assert (hasattr(net, "in_size"))
get_test_data_source_class = get_val_data_source if args.data_subset == "val" else get_test_data_source
test_data = get_test_data_source_class(
ds_metainfo=ds_metainfo,
batch_size=args.batch_size,
num_workers=args.num_workers)
if args.data_subset == "val":
test_metric = get_composite_metric(
metric_names=ds_metainfo.val_metric_names,
metric_extra_kwargs=ds_metainfo.val_metric_extra_kwargs)
else:
test_metric = get_composite_metric(
metric_names=ds_metainfo.test_metric_names,
metric_extra_kwargs=ds_metainfo.test_metric_extra_kwargs)
assert (args.use_pretrained or args.resume.strip())
acc_values = calc_model_accuracy(
net=net,
test_data=test_data,
metric=test_metric,
calc_weight_count=True,
calc_flops_only=args.calc_flops_only,
extended_log=True)
return acc_values[ds_metainfo.saver_acc_ind] if len(acc_values) > 0 else None
def main():
"""
Main body of script.
"""
args = parse_args()
if args.disable_cudnn_autotune:
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
if args.all:
args.use_pretrained = True
for model_name, model_metainfo in (_model_sha1.items() if version_info[0] >= 3 else _model_sha1.iteritems()):
error, checksum, repo_release_tag = model_metainfo
args.model = model_name
logging.info("==============")
logging.info("Checking model: {}".format(model_name))
acc_value = test_model(args=args)
if acc_value is not None:
exp_value = int(error) * 1e-4
if abs(acc_value - exp_value) > 2e-4:
logging.info("----> Wrong value detected (expected value: {})!".format(exp_value))
else:
test_model(args=args)
if __name__ == "__main__":
main()
| 9,650
| 29.638095
| 120
|
py
|
imgclsmob
|
imgclsmob-master/eval_ke.py
|
"""
Script for evaluating trained model on Keras (validate/test).
"""
import argparse
import time
import logging
import keras
from common.logger_utils import initialize_logging
from keras_.utils import prepare_ke_context, prepare_model, get_data_rec, get_data_generator, backend_agnostic_compile
def parse_args():
"""
Parse python script parameters.
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate a model for image classification (Keras)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--rec-train",
type=str,
default="../imgclsmob_data/imagenet_rec/train.rec",
help="the training data")
parser.add_argument(
"--rec-train-idx",
type=str,
default="../imgclsmob_data/imagenet_rec/train.idx",
help="the index of training data")
parser.add_argument(
"--rec-val",
type=str,
default="../imgclsmob_data/imagenet_rec/val.rec",
help="the validation data")
parser.add_argument(
"--rec-val-idx",
type=str,
default="../imgclsmob_data/imagenet_rec/val.idx",
help="the index of validation data")
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="data type for training")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=0.875,
help="inverted ratio for input image crop")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="keras, mxnet, tensorflow, tensorflow-gpu",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="keras, keras-mxnet, mxnet, mxnet-cu110",
help="list of pip packages for logging")
args = parser.parse_args()
return args
def test(net,
val_gen,
val_size,
batch_size,
num_gpus,
calc_weight_count=False,
extended_log=False):
"""
Main test routine.
Parameters:
----------
net : Model
Model.
val_gen : generator
Data loader.
val_size : int
Size of validation subset.
batch_size : int
Batch size.
num_gpus : int
Number of used GPUs.
calc_weight_count : bool, default False
Whether to calculate count of weights.
extended_log : bool, default False
Whether to log more precise accuracy values.
"""
keras.backend.set_learning_phase(0)
backend_agnostic_compile(
model=net,
loss="categorical_crossentropy",
optimizer=keras.optimizers.SGD(
lr=0.01,
momentum=0.0,
decay=0.0,
nesterov=False),
metrics=[keras.metrics.categorical_accuracy, keras.metrics.top_k_categorical_accuracy],
num_gpus=num_gpus)
# net.summary()
tic = time.time()
score = net.evaluate_generator(
generator=val_gen,
steps=(val_size // batch_size),
verbose=True)
err_top1_val = 1.0 - score[1]
err_top5_val = 1.0 - score[2]
if calc_weight_count:
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
logging.info("Model: {} trainable parameters".format(weight_count))
if extended_log:
logging.info("Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})".format(
top1=err_top1_val, top5=err_top5_val))
else:
logging.info("Test: err-top1={top1:.4f}\terr-top5={top5:.4f}".format(
top1=err_top1_val, top5=err_top5_val))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
def main():
"""
Main body of script.
"""
args = parse_args()
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
batch_size = prepare_ke_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip())
num_classes = net.classes if hasattr(net, "classes") else 1000
input_image_size = net.in_size if hasattr(net, "in_size") else (args.input_size, args.input_size)
train_data, val_data = get_data_rec(
rec_train=args.rec_train,
rec_train_idx=args.rec_train_idx,
rec_val=args.rec_val,
rec_val_idx=args.rec_val_idx,
batch_size=batch_size,
num_workers=args.num_workers,
input_image_size=input_image_size,
resize_inv_factor=args.resize_inv_factor,
only_val=True)
val_gen = get_data_generator(
data_iterator=val_data,
num_classes=num_classes)
val_size = 50000
assert (args.use_pretrained or args.resume.strip())
test(
net=net,
val_gen=val_gen,
val_size=val_size,
batch_size=batch_size,
num_gpus=args.num_gpus,
calc_weight_count=True,
extended_log=True)
if __name__ == "__main__":
main()
| 6,665
| 27.365957
| 118
|
py
|
imgclsmob
|
imgclsmob-master/load_model.py
|
"""
Script for downloading model weights.
"""
import argparse
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description="Download model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--model",
type=str,
required=True,
help="model name")
args = parser.parse_args()
return args
def main():
args = parse_args()
from gluon.utils import prepare_model as prepare_model_gl
prepare_model_gl(
model_name=args.model,
use_pretrained=True,
pretrained_model_file_path="",
dtype=np.float32)
from pytorch.utils import prepare_model as prepare_model_pt
prepare_model_pt(
model_name=args.model,
use_pretrained=True,
pretrained_model_file_path="",
use_cuda=False)
from chainer_.utils import prepare_model as prepare_model_ch
prepare_model_ch(
model_name=args.model,
use_pretrained=True,
pretrained_model_file_path="")
from tensorflow2.utils import prepare_model as prepare_model_tf2
prepare_model_tf2(
model_name=args.model,
use_pretrained=True,
pretrained_model_file_path="",
use_cuda=False)
if __name__ == '__main__':
main()
| 1,326
| 23.574074
| 92
|
py
|
imgclsmob
|
imgclsmob-master/eval_gl.py
|
"""
Script for evaluating trained model on MXNet/Gluon (validate/test).
"""
import os
import time
import logging
import argparse
from sys import version_info
from common.logger_utils import initialize_logging
from gluon.utils import prepare_mx_context, prepare_model
from gluon.utils import calc_net_weight_count, validate
from gluon.utils import validate_asr
from gluon.utils import get_composite_metric
from gluon.utils import report_accuracy
from gluon.dataset_utils import get_dataset_metainfo
from gluon.dataset_utils import get_batch_fn
from gluon.dataset_utils import get_val_data_source, get_test_data_source
from gluon.model_stats import measure_model
from gluon.gluoncv2.models.model_store import _model_sha1
def add_eval_parser_arguments(parser):
"""
Create python script parameters (for eval specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="base data type for tensors")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters")
parser.add_argument(
"--calc-flops",
dest="calc_flops",
action="store_true",
help="calculate FLOPs")
parser.add_argument(
"--calc-flops-only",
dest="calc_flops_only",
action="store_true",
help="calculate FLOPs without quality estimation")
parser.add_argument(
"--data-subset",
type=str,
default="val",
help="data subset. options are val and test")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="mxnet, numpy",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="mxnet-cu110, mxnet-cu112",
help="list of pip packages for logging")
parser.add_argument(
"--disable-cudnn-autotune",
action="store_true",
help="disable cudnn autotune for segmentation models")
parser.add_argument(
"--not-show-progress",
action="store_true",
help="do not show progress bar")
parser.add_argument(
"--all",
action="store_true",
help="test all pretrained models for partucular dataset")
def parse_args():
"""
Create python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate a model for image classification/segmentation (Gluon)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K_rec",
help="dataset name. options are ImageNet1K, ImageNet1K_rec, CUB200_2011, CIFAR10, CIFAR100, SVHN, VOC2012, "
"ADE20K, Cityscapes, COCO, LibriSpeech")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_eval_parser_arguments(parser)
args = parser.parse_args()
return args
def calc_model_accuracy(net,
test_data,
batch_fn,
data_source_needs_reset,
metric,
dtype,
ctx,
input_image_size,
in_channels,
calc_weight_count=False,
calc_flops=False,
calc_flops_only=True,
extended_log=False,
ml_type="cls"):
"""
Main test routine.
Parameters:
----------
net : HybridBlock
Model.
test_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator.
batch_fn : func
Function for splitting data after extraction from data loader.
data_source_needs_reset : bool
Whether to reset data (if test_data is ImageRecordIter).
metric : EvalMetric
Metric object instance.
dtype : str
Base data type for tensors.
ctx : Context
MXNet context.
input_image_size : tuple of 2 ints
Spatial size of the expected input image.
in_channels : int
Number of input channels.
calc_weight_count : bool, default False
Whether to calculate count of weights.
calc_flops : bool, default False
Whether to calculate FLOPs.
calc_flops_only : bool, default True
Whether to only calculate FLOPs without testing.
extended_log : bool, default False
Whether to log more precise accuracy values.
ml_type : str, default 'cls'
Machine learning type.
Returns:
-------
list of floats
Accuracy values.
"""
if not calc_flops_only:
validate_fn = validate_asr if ml_type == "asr" else validate
# validate_fn = validate
tic = time.time()
validate_fn(
metric=metric,
net=net,
val_data=test_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
accuracy_msg = report_accuracy(
metric=metric,
extended_log=extended_log)
logging.info("Test: {}".format(accuracy_msg))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
acc_values = metric.get()[1]
acc_values = acc_values if type(acc_values) == list else [acc_values]
else:
acc_values = []
if calc_weight_count:
weight_count = calc_net_weight_count(net)
if not calc_flops:
logging.info("Model: {} trainable parameters".format(weight_count))
if calc_flops:
in_shapes = [(1, 640 * 25 * 5), (1,)] if ml_type == "asr" else\
[(1, in_channels, input_image_size[0], input_image_size[1])]
num_flops, num_macs, num_params = measure_model(
model=net,
in_shapes=in_shapes,
ctx=ctx[0])
assert (not calc_weight_count) or (weight_count == num_params)
stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \
" FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)"
logging.info(stat_msg.format(
params=num_params, params_m=num_params / 1e6,
flops=num_flops, flops_m=num_flops / 1e6,
flops2=num_flops / 2, flops2_m=num_flops / 2 / 1e6,
macs=num_macs, macs_m=num_macs / 1e6))
return acc_values
def test_model(args):
"""
Main test routine.
Parameters:
----------
args : ArgumentParser
Main script arguments.
Returns:
-------
float
Main accuracy value.
"""
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
assert (ds_metainfo.ml_type != "imgseg") or (args.data_subset != "test") or (args.batch_size == 1)
assert (ds_metainfo.ml_type != "imgseg") or args.disable_cudnn_autotune
ctx, batch_size = prepare_mx_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
dtype=args.dtype,
net_extra_kwargs=ds_metainfo.test_net_extra_kwargs,
load_ignore_extra=ds_metainfo.load_ignore_extra,
classes=(args.num_classes if ds_metainfo.ml_type != "hpe" else None),
in_channels=args.in_channels,
do_hybridize=(ds_metainfo.allow_hybridize and (not args.calc_flops)),
ctx=ctx)
assert (hasattr(net, "in_size"))
input_image_size = net.in_size
get_test_data_source_class = get_val_data_source if args.data_subset == "val" else get_test_data_source
test_data = get_test_data_source_class(
ds_metainfo=ds_metainfo,
batch_size=args.batch_size,
num_workers=args.num_workers)
batch_fn = get_batch_fn(ds_metainfo=ds_metainfo)
if args.data_subset == "val":
test_metric = get_composite_metric(
metric_names=ds_metainfo.val_metric_names,
metric_extra_kwargs=ds_metainfo.val_metric_extra_kwargs)
else:
test_metric = get_composite_metric(
metric_names=ds_metainfo.test_metric_names,
metric_extra_kwargs=ds_metainfo.test_metric_extra_kwargs)
if not args.not_show_progress:
from tqdm import tqdm
test_data = tqdm(test_data)
assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
acc_values = calc_model_accuracy(
net=net,
test_data=test_data,
batch_fn=batch_fn,
data_source_needs_reset=ds_metainfo.use_imgrec,
metric=test_metric,
dtype=args.dtype,
ctx=ctx,
input_image_size=input_image_size,
in_channels=args.in_channels,
# calc_weight_count=(not log_file_exist),
calc_weight_count=True,
calc_flops=args.calc_flops,
calc_flops_only=args.calc_flops_only,
extended_log=True,
ml_type=ds_metainfo.ml_type)
return acc_values[ds_metainfo.saver_acc_ind] if len(acc_values) > 0 else None
def main():
"""
Main body of script.
"""
args = parse_args()
if args.disable_cudnn_autotune:
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
if args.all:
args.use_pretrained = True
for model_name, model_metainfo in (_model_sha1.items() if version_info[0] >= 3 else _model_sha1.iteritems()):
error, checksum, repo_release_tag = model_metainfo
args.model = model_name
logging.info("==============")
logging.info("Checking model: {}".format(model_name))
acc_value = test_model(args=args)
if acc_value is not None:
exp_value = int(error) * 1e-4
if abs(acc_value - exp_value) > 2e-4:
logging.info("----> Wrong value detected (expected value: {})!".format(exp_value))
else:
test_model(args=args)
if __name__ == "__main__":
main()
| 11,941
| 31.53951
| 117
|
py
|
imgclsmob
|
imgclsmob-master/sotabench.py
|
from torchbench.image_classification import ImageNet
from pytorch.pytorchcv.models.model_store import _model_sha1
from pytorch.pytorchcv.model_provider import get_model as ptcv_get_model
import torchvision.transforms as transforms
import torch
import math
from sys import version_info
# import os
for model_name, model_metainfo in (_model_sha1.items() if version_info[0] >= 3 else _model_sha1.iteritems()):
net = ptcv_get_model(model_name, pretrained=True)
error, checksum, repo_release_tag, caption, paper, ds, img_size, scale, batch, rem = model_metainfo
if (ds != "in1k") or (img_size == 0) or ((len(rem) > 0) and (rem[-1] == "*")):
continue
paper_model_name = caption
paper_arxiv_id = paper
input_image_size = img_size
resize_inv_factor = scale
batch_size = batch
model_description = "pytorch" + (rem if rem == "" else ", " + rem)
assert (not hasattr(net, "in_size")) or (input_image_size == net.in_size[0])
ImageNet.benchmark(
model=net,
model_description=model_description,
paper_model_name=paper_model_name,
paper_arxiv_id=paper_arxiv_id,
input_transform=transforms.Compose([
transforms.Resize(int(math.ceil(float(input_image_size) / resize_inv_factor))),
transforms.CenterCrop(input_image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
batch_size=batch_size,
num_gpu=1,
# data_root=os.path.join("..", "imgclsmob_data", "imagenet")
)
torch.cuda.empty_cache()
| 1,645
| 39.146341
| 109
|
py
|
imgclsmob
|
imgclsmob-master/train_tf2.py
|
"""
Script for training model on TensorFlow 2.0.
"""
import os
import logging
import argparse
import numpy as np
import random
import tensorflow as tf
from common.logger_utils import initialize_logging
from tensorflow2.tf2cv.model_provider import get_model
from tensorflow2.dataset_utils import get_dataset_metainfo, get_train_data_source, get_val_data_source
def add_train_cls_parser_arguments(parser):
"""
Create python script parameters (for training/classification specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--resume-state",
type=str,
default="",
help="resume from previously saved optimizer state if not None")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--num-epochs",
type=int,
default=120,
help="number of training epochs.")
parser.add_argument(
"--start-epoch",
type=int,
default=1,
help="starting epoch for resuming, default is 1 for new training")
parser.add_argument(
"--attempt",
type=int,
default=1,
help="current attempt number for training")
parser.add_argument(
"--optimizer-name",
type=str,
default="nag",
help="optimizer name")
parser.add_argument(
"--lr",
type=float,
default=0.1,
help="learning rate")
parser.add_argument(
"--lr-mode",
type=str,
default="cosine",
help="learning rate scheduler mode. options are step, poly and cosine")
parser.add_argument(
"--lr-decay",
type=float,
default=0.1,
help="decay rate of learning rate")
parser.add_argument(
"--lr-decay-period",
type=int,
default=0,
help="interval for periodic learning rate decays. default is 0 to disable")
parser.add_argument(
"--lr-decay-epoch",
type=str,
default="40,60",
help="epoches at which learning rate decays")
parser.add_argument(
"--target-lr",
type=float,
default=1e-8,
help="ending learning rate")
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="momentum value for optimizer")
parser.add_argument(
"--wd",
type=float,
default=0.0001,
help="weight decay rate")
parser.add_argument(
"--log-interval",
type=int,
default=50,
help="number of batches to wait before logging")
parser.add_argument(
"--save-interval",
type=int,
default=4,
help="saving parameters epoch interval, best model will always be saved")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--seed",
type=int,
default=-1,
help="Random seed to be fixed")
parser.add_argument(
"--log-packages",
type=str,
default="tensorflow, tensorflow-gpu",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="tensorflow, tensorflow-gpu",
help="list of pip packages for logging")
def parse_args():
"""
Parse python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Train a model for image classification/segmentation (TensorFlow 2.0)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K",
help="dataset name. options are ImageNet1K, CUB200_2011, CIFAR10, CIFAR100, SVHN")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_train_cls_parser_arguments(parser)
args = parser.parse_args()
return args
def init_rand(seed):
if seed <= 0:
seed = np.random.randint(10000)
random.seed(seed)
np.random.seed(seed)
return seed
def main():
"""
Main body of script.
"""
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
data_format = "channels_last"
tf.keras.backend.set_image_data_format(data_format)
model = args.model
net = get_model(model, data_format=data_format)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
train_loss = tf.keras.metrics.Mean(name="train_loss")
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name="train_accuracy")
test_loss = tf.keras.metrics.Mean(name="test_loss")
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name="test_accuracy")
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = net(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, net.trainable_variables)
optimizer.apply_gradients(zip(gradients, net.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
@tf.function
def test_step(images, labels):
predictions = net(images)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
assert (ds_metainfo.ml_type != "imgseg") or (args.batch_size == 1)
# assert (ds_metainfo.ml_type != "imgseg") or args.disable_cudnn_autotune
batch_size = args.batch_size
train_data, train_img_count = get_train_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
data_format=data_format)
val_data, val_img_count = get_val_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
data_format=data_format)
num_epochs = args.num_epochs
for epoch in range(num_epochs):
for images, labels in train_data:
train_step(images, labels)
# break
for test_images, test_labels in val_data:
test_step(test_images, test_labels)
# break
template = "Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}"
logging.info(template.format(
epoch + 1,
train_loss.result(),
train_accuracy.result() * 100,
test_loss.result(),
test_accuracy.result() * 100))
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
if __name__ == "__main__":
main()
| 8,479
| 28.041096
| 102
|
py
|
imgclsmob
|
imgclsmob-master/eval_pt.py
|
"""
Script for evaluating trained model on PyTorch (validate/test).
"""
import os
import time
import logging
import argparse
from sys import version_info
from common.logger_utils import initialize_logging
from pytorch.utils import prepare_pt_context, prepare_model
from pytorch.utils import calc_net_weight_count, validate
from pytorch.utils import get_composite_metric
from pytorch.utils import report_accuracy
from pytorch.dataset_utils import get_dataset_metainfo
from pytorch.dataset_utils import get_val_data_source, get_test_data_source
from pytorch.model_stats import measure_model
from pytorch.pytorchcv.models.model_store import _model_sha1
def add_eval_cls_parser_arguments(parser):
"""
Create python script parameters (for eval specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters")
parser.add_argument(
"--calc-flops",
dest="calc_flops",
action="store_true",
help="calculate FLOPs")
parser.add_argument(
"--calc-flops-only",
dest="calc_flops_only",
action="store_true",
help="calculate FLOPs without quality estimation")
parser.add_argument(
"--remove-module",
action="store_true",
help="enable if stored model has module")
parser.add_argument(
"--data-subset",
type=str,
default="val",
help="data subset. options are val and test")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="torch, torchvision",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="",
help="list of pip packages for logging")
parser.add_argument(
"--disable-cudnn-autotune",
action="store_true",
help="disable cudnn autotune for segmentation models")
parser.add_argument(
"--show-progress",
action="store_true",
help="show progress bar")
parser.add_argument(
"--all",
action="store_true",
help="test all pretrained models for partucular dataset")
def parse_args():
"""
Parse python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate a model for image classification/segmentation (PyTorch)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K",
help="dataset name. options are ImageNet1K, CUB200_2011, CIFAR10, CIFAR100, SVHN, VOC2012, ADE20K, Cityscapes, "
"COCO, LibriSpeech, MCV")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_eval_cls_parser_arguments(parser)
args = parser.parse_args()
return args
def prepare_dataset_metainfo(args):
"""
Get dataset metainfo by name of dataset.
Parameters:
----------
args : ArgumentParser
Main script arguments.
Returns:
-------
DatasetMetaInfo
Dataset metainfo.
"""
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
assert (ds_metainfo.ml_type != "imgseg") or (args.batch_size == 1)
assert (ds_metainfo.ml_type != "imgseg") or args.disable_cudnn_autotune
return ds_metainfo
def prepare_data_source(ds_metainfo,
data_subset,
batch_size,
num_workers):
"""
Prepare data loader.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
data_subset : str
Data subset.
batch_size : int
Batch size.
num_workers : int
Number of background workers.
Returns:
-------
DataLoader
Data source.
"""
assert (data_subset in ("val", "test"))
if data_subset == "val":
get_data_source_class = get_val_data_source
else:
get_data_source_class = get_test_data_source
data_source = get_data_source_class(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=num_workers)
return data_source
def prepare_metric(ds_metainfo,
data_subset):
"""
Prepare metric.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
data_subset : str
Data subset.
Returns:
-------
CompositeEvalMetric
Metric object instance.
"""
assert (data_subset in ("val", "test"))
if data_subset == "val":
metric_names = ds_metainfo.val_metric_names
metric_extra_kwargs = ds_metainfo.val_metric_extra_kwargs
else:
metric_names = ds_metainfo.test_metric_names
metric_extra_kwargs = ds_metainfo.test_metric_extra_kwargs
metric = get_composite_metric(
metric_names=metric_names,
metric_extra_kwargs=metric_extra_kwargs)
return metric
def update_input_image_size(net,
input_size):
"""
Update input image size for model.
Parameters:
----------
net : Module
Model.
input_size : int
Preliminary value for input image size.
Returns:
-------
tuple of 2 ints
Spatial size of the expected input image.
"""
real_net = net.module if hasattr(net, "module") else net
input_image_size = real_net.in_size if hasattr(real_net, "in_size") else\
((input_size, input_size) if type(input_size) == int else input_size)
return input_image_size
def calc_model_accuracy(net,
test_data,
metric,
use_cuda,
input_image_size,
in_channels,
calc_weight_count=False,
calc_flops=False,
calc_flops_only=True,
extended_log=False,
ml_type="cls"):
"""
Estimating particular model accuracy.
Parameters:
----------
net : Module
Model.
test_data : DataLoader
Data loader.
metric : EvalMetric
Metric object instance.
use_cuda : bool
Whether to use CUDA.
input_image_size : tuple of 2 ints
Spatial size of the expected input image.
in_channels : int
Number of input channels.
calc_weight_count : bool, default False
Whether to calculate count of weights.
calc_flops : bool, default False
Whether to calculate FLOPs.
calc_flops_only : bool, default True
Whether to only calculate FLOPs without testing.
extended_log : bool, default False
Whether to log more precise accuracy values.
ml_type : str, default 'cls'
Machine learning type.
Returns:
-------
list of floats
Accuracy values.
"""
if not calc_flops_only:
tic = time.time()
validate(
metric=metric,
net=net,
val_data=test_data,
use_cuda=use_cuda)
accuracy_msg = report_accuracy(
metric=metric,
extended_log=extended_log)
logging.info("Test: {}".format(accuracy_msg))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
acc_values = metric.get()[1]
acc_values = acc_values if type(acc_values) == list else [acc_values]
else:
acc_values = []
if calc_weight_count:
weight_count = calc_net_weight_count(net)
if not calc_flops:
logging.info("Model: {} trainable parameters".format(weight_count))
if calc_flops:
in_shapes = [(1, 640 * 25 * 5), (1,)] if ml_type == "asr" else\
[(1, in_channels, input_image_size[0], input_image_size[1])]
num_flops, num_macs, num_params = measure_model(
model=net,
in_shapes=in_shapes)
assert (not calc_weight_count) or (weight_count == num_params)
stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \
" FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)"
logging.info(stat_msg.format(
params=num_params, params_m=num_params / 1e6,
flops=num_flops, flops_m=num_flops / 1e6,
flops2=num_flops / 2, flops2_m=num_flops / 2 / 1e6,
macs=num_macs, macs_m=num_macs / 1e6))
return acc_values
def test_model(args):
"""
Main test routine.
Parameters:
----------
args : ArgumentParser
Main script arguments.
Returns:
-------
float
Main accuracy value.
"""
ds_metainfo = prepare_dataset_metainfo(args=args)
use_cuda, batch_size = prepare_pt_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
data_source = prepare_data_source(
ds_metainfo=ds_metainfo,
data_subset=args.data_subset,
batch_size=batch_size,
num_workers=args.num_workers)
metric = prepare_metric(
ds_metainfo=ds_metainfo,
data_subset=args.data_subset)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
use_cuda=use_cuda,
num_classes=(args.num_classes if ds_metainfo.ml_type != "hpe" else None),
in_channels=args.in_channels,
net_extra_kwargs=ds_metainfo.test_net_extra_kwargs,
load_ignore_extra=ds_metainfo.load_ignore_extra,
remove_module=args.remove_module)
input_image_size = update_input_image_size(
net=net,
input_size=(args.input_size if hasattr(args, "input_size") else None))
if args.show_progress:
from tqdm import tqdm
data_source = tqdm(data_source)
assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
acc_values = calc_model_accuracy(
net=net,
test_data=data_source,
metric=metric,
use_cuda=use_cuda,
input_image_size=input_image_size,
in_channels=args.in_channels,
calc_weight_count=True,
calc_flops=args.calc_flops,
calc_flops_only=args.calc_flops_only,
extended_log=True,
ml_type=ds_metainfo.ml_type)
return acc_values[ds_metainfo.saver_acc_ind] if len(acc_values) > 0 else None
def main():
"""
Main body of script.
"""
args = parse_args()
if args.disable_cudnn_autotune:
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
if args.all:
args.use_pretrained = True
dataset_name_map = {
"in1k": "ImageNet1K",
"cub": "CUB200_2011",
"cf10": "CIFAR10",
"cf100": "CIFAR100",
"svhn": "SVHN",
"voc": "VOC",
"ade20k": "ADE20K",
"cs": "Cityscapes",
"cocoseg": "CocoSeg",
"cocohpe": "CocoHpe",
"hp": "HPatches",
"ls": "LibriSpeech",
"mcv": "MCV",
}
for model_name, model_metainfo in (_model_sha1.items() if version_info[0] >= 3 else _model_sha1.iteritems()):
error, checksum, repo_release_tag, caption, paper, ds, img_size, scale, batch, rem = model_metainfo
if (ds != "in1k") or (img_size == 0) or ((len(rem) > 0) and (rem[-1] == "*")):
continue
args.dataset = dataset_name_map[ds]
args.model = model_name
args.input_size = img_size
args.resize_inv_factor = scale
args.batch_size = batch
logging.info("==============")
logging.info("Checking model: {}".format(model_name))
acc_value = test_model(args=args)
if acc_value is not None:
exp_value = int(error) * 1e-4
if abs(acc_value - exp_value) > 2e-4:
logging.info("----> Wrong value detected (expected value: {})!".format(exp_value))
else:
test_model(args=args)
if __name__ == "__main__":
main()
| 13,989
| 29.347072
| 120
|
py
|
imgclsmob
|
imgclsmob-master/eval_gl_det.py
|
"""
Script for evaluating trained model on MXNet/Gluon (validate/test).
"""
import os
import time
import logging
import argparse
from sys import version_info
from common.logger_utils import initialize_logging
from gluon.utils import prepare_mx_context, prepare_model
from gluon.utils import calc_net_weight_count, validate
from gluon.utils import get_composite_metric
from gluon.utils import report_accuracy
from gluon.dataset_utils import get_dataset_metainfo
from gluon.dataset_utils import get_batch_fn
from gluon.dataset_utils import get_val_data_source, get_test_data_source
from gluon.model_stats import measure_model
from gluon.gluoncv2.models.model_store import _model_sha1
def add_eval_parser_arguments(parser):
"""
Create python script parameters (for eval specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="base data type for tensors")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters")
parser.add_argument(
"--calc-flops",
dest="calc_flops",
action="store_true",
help="calculate FLOPs")
parser.add_argument(
"--calc-flops-only",
dest="calc_flops_only",
action="store_true",
help="calculate FLOPs without quality estimation")
parser.add_argument(
"--data-subset",
type=str,
default="val",
help="data subset. options are val and test")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="mxnet, numpy",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="mxnet-cu102б mxnet-cu110",
help="list of pip packages for logging")
parser.add_argument(
"--disable-cudnn-autotune",
action="store_true",
help="disable cudnn autotune for segmentation models")
parser.add_argument(
"--show-progress",
action="store_true",
help="show progress bar")
parser.add_argument(
"--all",
action="store_true",
help="test all pretrained models for partucular dataset")
def parse_args():
"""
Create python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate a model for image classification/segmentation (Gluon)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K_rec",
help="dataset name. options are ImageNet1K, ImageNet1K_rec, CUB200_2011, CIFAR10, CIFAR100, SVHN, VOC2012, "
"ADE20K, Cityscapes, COCO")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_eval_parser_arguments(parser)
args = parser.parse_args()
return args
def calc_model_accuracy(net,
test_data,
batch_fn,
data_source_needs_reset,
metric,
dtype,
ctx,
input_image_size,
in_channels,
calc_weight_count=False,
calc_flops=False,
calc_flops_only=True,
extended_log=False):
"""
Main test routine.
Parameters:
----------
net : HybridBlock
Model.
test_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator.
batch_fn : func
Function for splitting data after extraction from data loader.
data_source_needs_reset : bool
Whether to reset data (if test_data is ImageRecordIter).
metric : EvalMetric
Metric object instance.
dtype : str
Base data type for tensors.
ctx : Context
MXNet context.
input_image_size : tuple of 2 ints
Spatial size of the expected input image.
in_channels : int
Number of input channels.
calc_weight_count : bool, default False
Whether to calculate count of weights.
calc_flops : bool, default False
Whether to calculate FLOPs.
calc_flops_only : bool, default True
Whether to only calculate FLOPs without testing.
extended_log : bool, default False
Whether to log more precise accuracy values.
Returns:
-------
list of floats
Accuracy values.
"""
if not calc_flops_only:
tic = time.time()
validate(
metric=metric,
net=net,
val_data=test_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
accuracy_msg = report_accuracy(
metric=metric,
extended_log=extended_log)
logging.info("Test: {}".format(accuracy_msg))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
acc_values = metric.get()[1]
acc_values = acc_values if type(acc_values) == list else [acc_values]
else:
acc_values = []
if calc_weight_count:
weight_count = calc_net_weight_count(net)
if not calc_flops:
logging.info("Model: {} trainable parameters".format(weight_count))
if calc_flops:
num_flops, num_macs, num_params = measure_model(net, in_channels, input_image_size, ctx[0])
assert (not calc_weight_count) or (weight_count == num_params)
stat_msg = "Params: {params} ({params_m:.2f}M), FLOPs: {flops} ({flops_m:.2f}M)," \
" FLOPs/2: {flops2} ({flops2_m:.2f}M), MACs: {macs} ({macs_m:.2f}M)"
logging.info(stat_msg.format(
params=num_params, params_m=num_params / 1e6,
flops=num_flops, flops_m=num_flops / 1e6,
flops2=num_flops / 2, flops2_m=num_flops / 2 / 1e6,
macs=num_macs, macs_m=num_macs / 1e6))
return acc_values
def test_model(args):
"""
Main test routine.
Parameters:
----------
args : ArgumentParser
Main script arguments.
Returns:
-------
float
Main accuracy value.
"""
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
assert (ds_metainfo.ml_type != "imgseg") or (args.batch_size == 1)
assert (ds_metainfo.ml_type != "imgseg") or args.disable_cudnn_autotune
ctx, batch_size = prepare_mx_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
dtype=args.dtype,
net_extra_kwargs=ds_metainfo.test_net_extra_kwargs,
load_ignore_extra=ds_metainfo.load_ignore_extra,
classes=(args.classes if ds_metainfo.ml_type != "hpe" else None),
in_channels=args.in_channels,
do_hybridize=(ds_metainfo.allow_hybridize and (not args.calc_flops)),
ctx=ctx)
assert (hasattr(net, "in_size"))
input_image_size = net.in_size
get_test_data_source_class = get_val_data_source if args.data_subset == "val" else get_test_data_source
test_data = get_test_data_source_class(
ds_metainfo=ds_metainfo,
batch_size=args.batch_size,
num_workers=args.num_workers)
batch_fn = get_batch_fn(ds_metainfo=ds_metainfo)
if args.data_subset == "val":
test_metric = get_composite_metric(
metric_names=ds_metainfo.val_metric_names,
metric_extra_kwargs=ds_metainfo.val_metric_extra_kwargs)
else:
test_metric = get_composite_metric(
metric_names=ds_metainfo.test_metric_names,
metric_extra_kwargs=ds_metainfo.test_metric_extra_kwargs)
if args.show_progress:
from tqdm import tqdm
test_data = tqdm(test_data)
assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
acc_values = calc_model_accuracy(
net=net,
test_data=test_data,
batch_fn=batch_fn,
data_source_needs_reset=ds_metainfo.use_imgrec,
metric=test_metric,
dtype=args.dtype,
ctx=ctx,
input_image_size=input_image_size,
in_channels=args.in_channels,
# calc_weight_count=(not log_file_exist),
calc_weight_count=True,
calc_flops=args.calc_flops,
calc_flops_only=args.calc_flops_only,
extended_log=True)
return acc_values[ds_metainfo.saver_acc_ind] if len(acc_values) > 0 else None
def main():
"""
Main body of script.
"""
args = parse_args()
if args.disable_cudnn_autotune:
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
if args.all:
args.use_pretrained = True
for model_name, model_metainfo in (_model_sha1.items() if version_info[0] >= 3 else _model_sha1.iteritems()):
error, checksum, repo_release_tag = model_metainfo
args.model = model_name
logging.info("==============")
logging.info("Checking model: {}".format(model_name))
acc_value = test_model(args=args)
if acc_value is not None:
exp_value = int(error) * 1e-4
if abs(acc_value - exp_value) > 2e-4:
logging.info("----> Wrong value detected (expected value: {})!".format(exp_value))
else:
test_model(args=args)
if __name__ == "__main__":
main()
| 11,409
| 31.140845
| 117
|
py
|
imgclsmob
|
imgclsmob-master/train_ke.py
|
"""
Script for training model on Keras.
"""
import argparse
import time
import logging
import os
import numpy as np
import random
import keras
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
import mxnet as mx
from common.logger_utils import initialize_logging
from keras_.utils import prepare_ke_context, prepare_model, get_data_rec, get_data_generator, backend_agnostic_compile
def parse_args():
"""
Parse python script parameters.
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Train a model for image classification (Keras)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--rec-train",
type=str,
default="../imgclsmob_data/imagenet_rec/train.rec",
help="the training data")
parser.add_argument(
"--rec-train-idx",
type=str,
default="../imgclsmob_data/imagenet_rec/train.idx",
help='the index of training data')
parser.add_argument(
"--rec-val",
type=str,
default="../imgclsmob_data/imagenet_rec/val.rec",
help="the validation data")
parser.add_argument(
"--rec-val-idx",
type=str,
default="../imgclsmob_data/imagenet_rec/val.idx",
help="the index of validation data")
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="data type for training")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--resume-state",
type=str,
default="",
help="resume from previously saved optimizer state if not None")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=0.875,
help="inverted ratio for input image crop")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--num-epochs",
type=int,
default=120,
help="number of training epochs")
parser.add_argument(
"--start-epoch",
type=int,
default=1,
help="starting epoch for resuming, default is 1 for new training")
parser.add_argument(
"--attempt",
type=int,
default=1,
help="current number of training")
parser.add_argument(
"--optimizer-name",
type=str,
default="nag",
help="optimizer name")
parser.add_argument(
"--lr",
type=float,
default=0.1,
help="learning rate")
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="momentum value for optimizer")
parser.add_argument(
"--wd",
type=float,
default=0.0001,
help="weight decay rate")
parser.add_argument(
"--log-interval",
type=int,
default=50,
help="number of batches to wait before logging")
parser.add_argument(
"--save-interval",
type=int,
default=4,
help="saving parameters epoch interval, best model will always be saved")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--seed",
type=int,
default=-1,
help="Random seed to be fixed")
parser.add_argument(
"--log-packages",
type=str,
default="keras",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="keras, keras-mxnet, keras-applications, keras-preprocessing",
help="list of pip packages for logging")
args = parser.parse_args()
return args
def init_rand(seed):
if seed <= 0:
seed = np.random.randint(10000)
random.seed(seed)
np.random.seed(seed)
mx.random.seed(seed)
return seed
def prepare_trainer(net,
optimizer_name,
momentum,
lr,
num_gpus,
state_file_path=None):
optimizer_name = optimizer_name.lower()
if (optimizer_name == "sgd") or (optimizer_name == "nag"):
optimizer = keras.optimizers.SGD(
lr=lr,
momentum=momentum,
nesterov=(optimizer_name == "nag"))
else:
raise ValueError("Usupported optimizer: {}".format(optimizer_name))
backend_agnostic_compile(
model=net,
loss="categorical_crossentropy",
optimizer=optimizer,
metrics=[keras.metrics.categorical_accuracy, keras.metrics.top_k_categorical_accuracy],
num_gpus=num_gpus)
if (state_file_path is not None) and state_file_path and os.path.exists(state_file_path):
net = load_model(filepath=state_file_path)
return net
def train_net(net,
train_gen,
val_gen,
train_num_examples,
val_num_examples,
num_epochs,
checkpoint_filepath,
start_epoch1):
checkpointer = ModelCheckpoint(
filepath=checkpoint_filepath,
verbose=1,
save_best_only=True)
tic = time.time()
net.fit_generator(
generator=train_gen,
samples_per_epoch=train_num_examples,
epochs=num_epochs,
verbose=True,
callbacks=[checkpointer],
validation_data=val_gen,
validation_steps=val_num_examples,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=(start_epoch1 - 1))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
def main():
"""
Main body of script.
"""
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
batch_size = prepare_ke_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip())
num_classes = net.classes if hasattr(net, "classes") else 1000
input_image_size = net.in_size if hasattr(net, "in_size") else (args.input_size, args.input_size)
train_data, val_data = get_data_rec(
rec_train=args.rec_train,
rec_train_idx=args.rec_train_idx,
rec_val=args.rec_val,
rec_val_idx=args.rec_val_idx,
batch_size=batch_size,
num_workers=args.num_workers,
input_image_size=input_image_size,
resize_inv_factor=args.resize_inv_factor)
train_gen = get_data_generator(
data_iterator=train_data,
num_classes=num_classes)
val_gen = get_data_generator(
data_iterator=val_data,
num_classes=num_classes)
net = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
momentum=args.momentum,
lr=args.lr,
num_gpus=args.num_gpus,
state_file_path=args.resume_state)
train_net(
net=net,
train_gen=train_gen,
val_gen=val_gen,
train_num_examples=1281167,
val_num_examples=50048,
num_epochs=args.num_epochs,
checkpoint_filepath=os.path.join(args.save_dir, "imagenet_{}.h5".format(args.model)),
start_epoch1=args.start_epoch)
if __name__ == "__main__":
main()
| 8,801
| 26.85443
| 118
|
py
|
imgclsmob
|
imgclsmob-master/eval_tf2.py
|
"""
Script for evaluating trained model on TensorFlow 2.0 (validate/test).
"""
import os
import time
import logging
import argparse
from sys import version_info
import tensorflow as tf
from common.logger_utils import initialize_logging
from tensorflow2.utils import prepare_model
from tensorflow2.tf2cv.models.model_store import _model_sha1
from tensorflow2.dataset_utils import get_dataset_metainfo, get_val_data_source, get_test_data_source
from tensorflow2.utils import get_composite_metric
from tensorflow2.utils import report_accuracy
def add_eval_parser_arguments(parser):
"""
Create python script parameters (for eval specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters")
parser.add_argument(
"--calc-flops-only",
dest="calc_flops_only",
action="store_true",
help="calculate FLOPs without quality estimation")
parser.add_argument(
"--data-subset",
type=str,
default="val",
help="data subset. options are val and test")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="tensorflow, tensorflow-gpu",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="tensorflow, tensorflow-gpu",
help="list of pip packages for logging")
parser.add_argument(
"--disable-cudnn-autotune",
action="store_true",
help="disable cudnn autotune for segmentation models")
parser.add_argument(
"--show-progress",
action="store_true",
help="show progress bar")
parser.add_argument(
"--all",
action="store_true",
help="test all pretrained models for partucular dataset")
def parse_args():
"""
Create python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate a model for image classification/segmentation (TensorFlow 2.0)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K",
help="dataset name. options are ImageNet1K, ImageNet1K_rec, CUB200_2011, CIFAR10, CIFAR100, SVHN, VOC2012, "
"ADE20K, Cityscapes, COCO")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_eval_parser_arguments(parser)
args = parser.parse_args()
return args
def test_model(args,
use_cuda,
data_format):
"""
Main test routine.
Parameters:
----------
args : ArgumentParser
Main script arguments.
use_cuda : bool
Whether to use CUDA.
data_format : str
The ordering of the dimensions in tensors.
Returns:
-------
float
Main accuracy value.
"""
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
assert (ds_metainfo.ml_type != "imgseg") or (args.batch_size == 1)
assert (ds_metainfo.ml_type != "imgseg") or args.disable_cudnn_autotune
batch_size = args.batch_size
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
net_extra_kwargs=ds_metainfo.test_net_extra_kwargs,
load_ignore_extra=ds_metainfo.load_ignore_extra,
batch_size=batch_size,
use_cuda=use_cuda)
assert (hasattr(net, "in_size"))
if not args.calc_flops_only:
tic = time.time()
get_test_data_source_class = get_val_data_source if args.data_subset == "val" else get_test_data_source
test_data, total_img_count = get_test_data_source_class(
ds_metainfo=ds_metainfo,
batch_size=args.batch_size,
data_format=data_format)
if args.data_subset == "val":
test_metric = get_composite_metric(
metric_names=ds_metainfo.val_metric_names,
metric_extra_kwargs=ds_metainfo.val_metric_extra_kwargs)
else:
test_metric = get_composite_metric(
metric_names=ds_metainfo.test_metric_names,
metric_extra_kwargs=ds_metainfo.test_metric_extra_kwargs)
if args.show_progress:
from tqdm import tqdm
test_data = tqdm(test_data)
processed_img_count = 0
for test_images, test_labels in test_data:
predictions = net(test_images)
test_metric.update(test_labels, predictions)
processed_img_count += len(test_images)
if processed_img_count >= total_img_count:
break
accuracy_msg = report_accuracy(
metric=test_metric,
extended_log=True)
logging.info("Test: {}".format(accuracy_msg))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
acc_values = test_metric.get()[1]
acc_values = acc_values if type(acc_values) == list else [acc_values]
else:
acc_values = []
return acc_values
def main():
"""
Main body of script.
"""
args = parse_args()
if args.disable_cudnn_autotune:
os.environ["TF_CUDNN_USE_AUTOTUNE"] = "0"
# os.environ["TF_CUDNN_DETERMINISTIC"] = "1"
# os.environ["TF_DETERMINISTIC_OPS"] = "1"
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
data_format = "channels_last"
tf.keras.backend.set_image_data_format(data_format)
use_cuda = (args.num_gpus > 0)
if args.all:
args.use_pretrained = True
dataset_name_map = {
"in1k": "ImageNet1K",
"cub": "CUB200_2011",
"cf10": "CIFAR10",
"cf100": "CIFAR100",
"svhn": "SVHN",
"voc": "VOC",
"ade20k": "ADE20K",
"cs": "Cityscapes",
"cocoseg": "CocoSeg",
"cocohpe": "CocoHpe",
"hp": "HPatches",
"ls": "LibriSpeech",
"mcv": "MCV",
}
for model_name, model_metainfo in (_model_sha1.items() if version_info[0] >= 3 else _model_sha1.iteritems()):
error, checksum, repo_release_tag, ds, scale = model_metainfo
args.dataset = dataset_name_map[ds]
args.model = model_name
args.resize_inv_factor = scale
logging.info("==============")
logging.info("Checking model: {}".format(model_name))
acc_value = test_model(
args=args,
use_cuda=use_cuda,
data_format=data_format)
if acc_value is not None:
exp_value = int(error) * 1e-4
if abs(acc_value - exp_value) > 2e-4:
logging.info("----> Wrong value detected (expected value: {})!".format(exp_value))
tf.keras.backend.clear_session()
else:
test_model(
args=args,
use_cuda=use_cuda,
data_format=data_format)
if __name__ == "__main__":
main()
| 9,076
| 29.979522
| 117
|
py
|
imgclsmob
|
imgclsmob-master/prep_model.py
|
"""
Script for preparing the model for publication.
"""
import os
import argparse
import subprocess
import shutil
import re
import hashlib
import zipfile
import pandas as pd
def parse_args():
"""
Parse python script parameters.
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(description="Prepare model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--model",
type=str,
required=True,
help="model name")
parser.add_argument(
"--resume",
type=str,
default="",
help="model weights (Gluon) file path")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
args = parser.parse_args()
return args
def calc_sha1(file_name):
"""
Calculate sha1 hash of the file content.
Parameters:
----------
file_name : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns:
-------
str
sha1 hex digest.
"""
sha1 = hashlib.sha1()
with open(file_name, "rb") as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def post_process(dst_dir_path,
model_name,
model_file_path,
log_file_path,
dst_model_file_ext,
log_line_num):
"""
Post-process weight/log files.
Parameters:
----------
dst_dir_path : str
Destination dir path.
model_name : str
Model name.
model_file_path : str
Model file path.
log_file_path : str
Log file path.
dst_model_file_ext : str
Destination model file extension.
log_line_num : int
Log file last line number for analysis.
Returns:
-------
top5_err : str
top5 error value.
sha1_value : str
sha1 hex digest.
"""
with open(log_file_path, "r") as f:
log_file_tail = f.read().splitlines()[log_line_num]
err5_str = re.findall(r", err-top5=\d+\.\d+", log_file_tail)
if len(err5_str) != 0:
top5_err = re.findall(r"\d+\.\d+", err5_str[0])[0].split(".")[1]
else:
with open(log_file_path, "r") as f:
log_file_tail = f.read().splitlines()[log_line_num - 1]
err5_str = re.findall(r", err-top5=\d+\.\d+", log_file_tail)
top5_err = re.findall(r"\d+\.\d+", err5_str[0])[0].split(".")[1]
sha1_value = calc_sha1(model_file_path)
dst_model_file_name = "{}-{}-{}.{}".format(model_name, top5_err, sha1_value[:8], dst_model_file_ext)
dst_model_file_path = os.path.join(dst_dir_path, dst_model_file_name)
os.rename(model_file_path, dst_model_file_path)
os.rename(log_file_path, dst_model_file_path + ".log")
with zipfile.ZipFile(dst_model_file_path + ".zip", "w", zipfile.ZIP_DEFLATED) as zf:
zf.write(filename=dst_model_file_path, arcname=dst_model_file_name)
os.remove(dst_model_file_path)
return top5_err, sha1_value
def process_fwk(prep_info_dict,
dst_framework,
dst_dir_path,
model_name,
model_file_path,
log_file_path,
input_size):
"""
Process weights on specific framework.
Parameters:
----------
prep_info_dict : dict
Dictionary with preparation meta-info.
dst_dir_path : str
Destination dir path.
model_name : str
Model name.
model_file_path : str
Model file path.
log_file_path : str
Log file path.
dst_framework : str
Destination framework.
input_size : int
Size of the input for model.
"""
if dst_framework == "gluon":
dst_model_file_ext = "params"
eval_script = "eval_gl"
num_gpus = 1
calc_flops = "--calc-flops"
log_line_num = -3
elif dst_framework == "pytorch":
dst_model_file_ext = "pth"
eval_script = "eval_pt"
num_gpus = 1
calc_flops = "--calc-flops"
log_line_num = -3
elif dst_framework == "chainer":
dst_model_file_ext = "npz"
eval_script = "eval_ch"
num_gpus = 1
calc_flops = ""
log_line_num = -2
elif dst_framework == "tf2":
dst_model_file_ext = "tf2.h5"
eval_script = "eval_tf2"
num_gpus = 1
calc_flops = ""
log_line_num = -2
else:
raise ValueError("Unknown framework: {}".format(dst_framework))
post_proc_log_files = [f for f in os.listdir(dst_dir_path) if f.endswith(".{}.log".format(dst_model_file_ext))]
assert (len(post_proc_log_files) in [0, 1])
if len(post_proc_log_files) == 0:
dst_raw_log_file_path = os.path.join(dst_dir_path, "train.log")
shutil.copy2(log_file_path, dst_raw_log_file_path)
dst_raw_model_file_path = os.path.join(dst_dir_path, "{}.{}".format(model_name, dst_model_file_ext))
if dst_framework == "gluon":
shutil.copy2(model_file_path, dst_raw_model_file_path)
else:
command = "python3 convert_models.py --src-fwk=gluon --dst-fwk={dst_framework} --src-model={model_name}" \
" --dst-model={model_name} --src-params={model_file_path}" \
" --dst-params={dst_raw_model_file_path} --save-dir={dst_dir_path}"
subprocess.call([command.format(
dst_framework=dst_framework,
model_name=model_name,
model_file_path=model_file_path,
dst_raw_model_file_path=dst_raw_model_file_path,
dst_dir_path=dst_dir_path)], shell=True)
command = "python3 {eval_script}.py --model={model_name} --resume={dst_raw_model_file_path}" \
" --save-dir={dst_dir_path} --num-gpus={num_gpus} --batch-size=100 -j=4 --input-size={input_size} " \
"{calc_flops}"
subprocess.call([command.format(
eval_script=eval_script,
model_name=model_name,
dst_raw_model_file_path=dst_raw_model_file_path,
dst_dir_path=dst_dir_path,
num_gpus=num_gpus,
input_size=input_size,
calc_flops=calc_flops)], shell=True)
if dst_framework == "gluon":
shutil.copy2(dst_raw_log_file_path, log_file_path)
top5_err, sha1_value = post_process(
dst_dir_path=dst_dir_path,
model_name=model_name,
model_file_path=dst_raw_model_file_path,
log_file_path=dst_raw_log_file_path,
dst_model_file_ext=dst_model_file_ext,
log_line_num=log_line_num)
else:
model_name1, top5_err, sha1_short = post_proc_log_files[0].split(".")[0].split("-")
assert (model_name1 == model_name)
dst_model_file_name = "{}-{}-{}.{}".format(model_name, top5_err, sha1_short, dst_model_file_ext)
dst_model_file_path = os.path.join(dst_dir_path, dst_model_file_name)
dst_zip_model_file_path = dst_model_file_path + ".zip"
assert os.path.exists(dst_zip_model_file_path)
with zipfile.ZipFile(dst_zip_model_file_path, "r") as zf:
zf.extract(dst_model_file_name, dst_dir_path)
sha1_value = calc_sha1(dst_model_file_path)
os.remove(dst_model_file_path)
prep_info_dict["Type"].append(dst_framework)
prep_info_dict["Top5"].append(top5_err)
prep_info_dict["Sha1"].append(sha1_value)
def main():
args = parse_args()
model_name = args.model
model_file_path = os.path.expanduser(args.resume)
if not os.path.exists(model_file_path):
raise Exception("Model file doesn't exist: {}".format(model_file_path))
root_dir_path = os.path.dirname(model_file_path)
log_file_path = os.path.join(root_dir_path, "train.log")
if not os.path.exists(log_file_path):
raise Exception("Log file doesn't exist: {}".format(log_file_path))
dst_dir_path = os.path.join(root_dir_path, "_result")
if not os.path.exists(dst_dir_path):
os.mkdir(dst_dir_path)
prep_info_dict = {
"Type": [],
"Top5": [],
"Sha1": [],
}
input_size = args.input_size
dst_frameworks = ["gluon", "pytorch", "chainer", "tf2"]
# dst_frameworks = ["tf2"]
for dst_framework in dst_frameworks:
process_fwk(
prep_info_dict=prep_info_dict,
dst_framework=dst_framework,
dst_dir_path=dst_dir_path,
model_name=model_name,
model_file_path=model_file_path,
log_file_path=log_file_path,
input_size=input_size)
prep_info_df = pd.DataFrame(prep_info_dict)
prep_info_df.to_csv(
os.path.join(root_dir_path, "prep_info.csv"),
sep="\t",
index=False)
if __name__ == '__main__':
main()
| 9,068
| 30.380623
| 119
|
py
|
imgclsmob
|
imgclsmob-master/train_tf.py
|
"""
Script for training model on TensorFlow.
"""
import argparse
import numpy as np
import random
from tensorpack.input_source import QueueInput
from tensorpack.utils import logger
from tensorpack.utils.gpu import get_num_gpu
from tensorpack import ModelSaver, ScheduledHyperParamSetter, EstimatedTimeLeft, ClassificationError, InferenceRunner,\
DataParallelInferenceRunner, TrainConfig, SyncMultiGPUTrainerParameterServer, launch_train_with_config
from common.logger_utils import initialize_logging
from tensorflow_.utils_tp import prepare_tf_context, prepare_model, get_data
def parse_args():
"""
Parse python script parameters.
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Train a model for image classification (TensorFlow/TensorPack)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--data-dir",
type=str,
default="../imgclsmob_data/imagenet",
help="training and validation pictures to use")
parser.add_argument(
"--data-format",
type=str,
default="channels_last",
help="ordering of the dimensions in tensors. options are channels_last and channels_first")
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=0.875,
help="inverted ratio for input image crop")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--num-epochs",
type=int,
default=120,
help="number of training epochs")
parser.add_argument(
"--start-epoch",
type=int,
default=1,
help="starting epoch for resuming, default is 1 for new training")
parser.add_argument(
"--attempt",
type=int,
default=1,
help="current number of training")
parser.add_argument(
"--optimizer-name",
type=str,
default="nag",
help="optimizer name")
parser.add_argument(
"--lr",
type=float,
default=0.1,
help="learning rate")
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="momentum value for optimizer")
parser.add_argument(
"--wd",
type=float,
default=0.0001,
help="weight decay rate")
parser.add_argument(
"--log-interval",
type=int,
default=50,
help="number of batches to wait before logging")
parser.add_argument(
"--save-interval",
type=int,
default=4,
help="saving parameters epoch interval, best model will always be saved")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--seed",
type=int,
default=-1,
help="Random seed to be fixed")
parser.add_argument(
"--log-packages",
type=str,
default="tensorflow-gpu",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="tensorflow-gpu, tensorpack",
help="list of pip packages for logging")
args = parser.parse_args()
return args
def init_rand(seed):
if seed <= 0:
seed = np.random.randint(10000)
random.seed(seed)
np.random.seed(seed)
return seed
def train_net(net,
session_init,
batch_size,
num_epochs,
train_dataflow,
val_dataflow):
num_towers = max(get_num_gpu(), 1)
batch_per_tower = batch_size // num_towers
logger.info("Running on {} towers. Batch size per tower: {}".format(num_towers, batch_per_tower))
num_training_samples = 1281167
step_size = num_training_samples // batch_size
max_iter = (num_epochs - 1) * step_size
callbacks = [
ModelSaver(),
ScheduledHyperParamSetter(
"learning_rate",
[(0, 0.5), (max_iter, 0)],
interp="linear",
step_based=True),
EstimatedTimeLeft()]
infs = [ClassificationError("wrong-top1", "val-error-top1"),
ClassificationError("wrong-top5", "val-error-top5")]
if num_towers == 1:
# single-GPU inference with queue prefetch
callbacks.append(InferenceRunner(
input=QueueInput(val_dataflow),
infs=infs))
else:
# multi-GPU inference (with mandatory queue prefetch)
callbacks.append(DataParallelInferenceRunner(
input=val_dataflow,
infs=infs,
gpus=list(range(num_towers))))
config = TrainConfig(
dataflow=train_dataflow,
model=net,
callbacks=callbacks,
session_init=session_init,
steps_per_epoch=step_size,
max_epoch=num_epochs)
launch_train_with_config(
config=config,
trainer=SyncMultiGPUTrainerParameterServer(num_towers))
def main():
"""
Main body of script.
"""
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
logger.set_logger_dir(args.save_dir)
batch_size = prepare_tf_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net, inputs_desc = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
data_format=args.data_format)
train_dataflow = get_data(
is_train=True,
batch_size=batch_size,
data_dir_path=args.data_dir,
input_image_size=net.image_size,
resize_inv_factor=args.resize_inv_factor)
val_dataflow = get_data(
is_train=False,
batch_size=batch_size,
data_dir_path=args.data_dir,
input_image_size=net.image_size,
resize_inv_factor=args.resize_inv_factor)
train_net(
net=net,
session_init=inputs_desc,
batch_size=batch_size,
num_epochs=args.num_epochs,
train_dataflow=train_dataflow,
val_dataflow=val_dataflow)
if __name__ == "__main__":
main()
| 7,591
| 27.328358
| 119
|
py
|
imgclsmob
|
imgclsmob-master/convert_models.py
|
"""
Script for converting models between frameworks (MXNet, Gluon, PyTroch, Chainer, Keras, TensorFlow).
"""
import argparse
import logging
import re
import numpy as np
from common.logger_utils import initialize_logging
def parse_args():
parser = argparse.ArgumentParser(description="Convert models (Gluon/PyTorch/Chainer/MXNet/Keras/TF/TF2)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--src-fwk",
type=str,
required=True,
help="source model framework name")
parser.add_argument(
"--dst-fwk",
type=str,
required=True,
help="destination model framework name")
parser.add_argument(
"--src-model",
type=str,
required=True,
help="source model name")
parser.add_argument(
"--dst-model",
type=str,
required=True,
help="destination model name")
parser.add_argument(
"--src-params",
type=str,
default="",
help="source model parameter file path")
parser.add_argument(
"--dst-params",
type=str,
default="",
help="destination model parameter file path")
parser.add_argument(
"--load-ignore-extra",
action="store_true",
help="ignore extra layers in the source PyTroch model")
parser.add_argument(
"--remove-module",
action="store_true",
help="enable if stored PyTorch model has module")
parser.add_argument(
"--src-num-classes",
type=int,
default=1000,
help="number of classes for source model")
parser.add_argument(
"--src-in-channels",
type=int,
default=3,
help="number of input channels for source model")
parser.add_argument(
"--dst-num-classes",
type=int,
default=1000,
help="number of classes for destination model")
parser.add_argument(
"--dst-in-channels",
type=int,
default=3,
help="number of input channels for destination model")
parser.add_argument(
"--model-type",
type=str,
default="image",
help="model type (image or audio)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
args = parser.parse_args()
return args
def prepare_src_model(src_fwk,
src_model,
src_params_file_path,
dst_fwk,
ctx,
use_cuda,
load_ignore_extra=False,
remove_module=False,
num_classes=None,
in_channels=None):
ext_src_param_keys = None
ext_src_param_keys2 = None
src_net = None
if src_fwk == "gluon":
from gluon.utils import prepare_model as prepare_model_gl
src_net = prepare_model_gl(
model_name=src_model,
use_pretrained=False,
pretrained_model_file_path=src_params_file_path,
dtype=np.float32,
tune_layers="",
classes=(num_classes if num_classes > 0 else None),
in_channels=in_channels,
ctx=ctx)
src_params = src_net._collect_params_with_prefix()
src_param_keys = list(src_params.keys())
if src_model in ["oth_resnet50_v1", "oth_resnet101_v1", "oth_resnet152_v1", "oth_resnet50_v1b",
"oth_resnet101_v1b", "oth_resnet152_v1b"]:
src_param_keys = [key for key in src_param_keys if
not (key.startswith("features.") and key.endswith(".bias"))]
if src_model in ["oth_resnet50_v1", "oth_resnet101_v1", "oth_resnet152_v1", "oth_resnet50_v1b",
"oth_resnet101_v1b", "oth_resnet152_v1b"]:
src_param_keys = [key for key in src_param_keys if
not (key.startswith("features.") and key.endswith(".bias"))]
if src_model.startswith("wrn20_10_1bit") or src_model.startswith("wrn20_10_32bit"):
src_param_keys = [key for key in src_param_keys if
not (key.startswith("features.") and
(key.endswith(".bn.gamma") or key.endswith(".bn.beta")))]
if dst_fwk == "chainer":
src_param_keys_ = src_param_keys.copy()
src_param_keys = [key for key in src_param_keys_ if (not key.endswith(".running_mean")) and
(not key.endswith(".running_var"))]
ext_src_param_keys = [key for key in src_param_keys_ if (key.endswith(".running_mean")) or
(key.endswith(".running_var"))]
if src_model in ["condensenet74_c4_g4", "condensenet74_c8_g8"]:
src_param_keys_ = src_param_keys.copy()
src_param_keys = [key for key in src_param_keys_ if (not key.endswith(".index"))]
ext_src_param_keys2 = [key for key in src_param_keys_ if (key.endswith(".index"))]
elif src_model.startswith("xdensenet"):
src_param_keys_ = src_param_keys.copy()
src_param_keys = [key for key in src_param_keys_ if (not key.endswith(".mask"))]
ext_src_param_keys2 = [key for key in src_param_keys_ if (key.endswith(".mask"))]
elif src_model.startswith("jasper") or src_model.startswith("quartznet"):
src_param_keys_ = src_param_keys.copy()
src_param_keys = [key for key in src_param_keys_ if (not key.endswith(".window")) and
(not key.endswith(".fb"))]
ext_src_param_keys2 = [key for key in src_param_keys_ if (key.endswith(".window")) or
(key.endswith(".fb"))]
elif src_fwk == "pytorch":
from pytorch.utils import prepare_model as prepare_model_pt
src_net = prepare_model_pt(
model_name=src_model,
use_pretrained=False,
pretrained_model_file_path=src_params_file_path,
use_cuda=use_cuda,
use_data_parallel=False,
load_ignore_extra=load_ignore_extra,
num_classes=(num_classes if num_classes > 0 else None),
in_channels=in_channels,
remove_module=remove_module)
src_params = src_net.state_dict()
src_param_keys = list(src_params.keys())
if dst_fwk != "pytorch":
src_param_keys = [key for key in src_param_keys if not key.endswith("num_batches_tracked")]
if src_model in ["oth_shufflenetv2_wd2"]:
src_param_keys = [key for key in src_param_keys if not key.startswith("network.0.")]
if src_model.startswith("oth_dla"):
src1 = list(filter(re.compile("\.project").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src2 = []
for i in range(2, 6):
src1_i = list(filter(re.compile("level{}".format(i)).search, src1))
if len(src1_i) == 0:
continue
max_len = max([len(k) for k in src1_i])
pattern_i = [k for k in src1_i if len(k) == max_len][0][:-21]
src2_i = list(filter(re.compile(pattern_i).search, src1))
src2 += src2_i
src_param_keys = src2 + src1n
elif src_fwk == "mxnet":
import mxnet as mx
src_sym, src_arg_params, src_aux_params = mx.model.load_checkpoint(
prefix=src_params_file_path,
epoch=0)
src_params = {}
src_params.update(src_arg_params)
src_params.update(src_aux_params)
src_param_keys = list(src_params.keys())
elif src_fwk == "tensorflow":
# import tensorflow as tf
# from tensorflow_.utils import prepare_model as prepare_model_tf
# src_net = prepare_model_tf(
# model_name=src_model,
# classes=num_classes,
# use_pretrained=False,
# pretrained_model_file_path=src_params_file_path)
# src_param_keys = [v.name for v in tf.global_variables()]
# src_params = {v.name: v for v in tf.global_variables()}
src_net = None
src_params = dict(np.load(src_params_file_path))
src_param_keys = list(src_params.keys())
elif (src_fwk == "tf2") and (dst_fwk == "tfl"):
import tensorflow as tf
from tensorflow2.utils import prepare_model as prepare_model_tf2
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
src_net = prepare_model_tf2(
model_name=src_model,
use_pretrained=True,
pretrained_model_file_path="")
batch_size = 1
input_shape = ((batch_size, 3, src_net.in_size[0], src_net.in_size[1]) if
src_net.data_format == "channels_first" else
(batch_size, src_net.in_size[0], src_net.in_size[1], 3))
src_net(tf.random.normal(input_shape))
src_params = None
src_param_keys = None
else:
raise ValueError("Unsupported src fwk: {}".format(src_fwk))
return src_params, src_param_keys, ext_src_param_keys, ext_src_param_keys2, src_net
def prepare_dst_model(dst_fwk,
dst_model,
src_fwk,
ctx,
use_cuda,
num_classes=None,
in_channels=None,
model_type="image"):
if dst_fwk == "gluon":
from gluon.utils import prepare_model as prepare_model_gl
dst_net = prepare_model_gl(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="",
dtype=np.float32,
tune_layers="",
classes=(num_classes if num_classes > 0 else None),
in_channels=in_channels,
ctx=ctx)
dst_params = dst_net._collect_params_with_prefix()
dst_param_keys = list(dst_params.keys())
elif dst_fwk == "pytorch":
from pytorch.utils import prepare_model as prepare_model_pt
dst_net = prepare_model_pt(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="",
use_cuda=use_cuda,
use_data_parallel=False,
num_classes=(num_classes if num_classes > 0 else None),
in_channels=in_channels)
dst_params = dst_net.state_dict()
dst_param_keys = list(dst_params.keys())
if src_fwk != "pytorch":
dst_param_keys = [key for key in dst_param_keys if not key.endswith("num_batches_tracked")]
elif dst_fwk == "chainer":
from chainer_.utils import prepare_model as prepare_model_ch
dst_net = prepare_model_ch(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="")
dst_params = {i[0]: i[1] for i in dst_net.namedparams()}
dst_param_keys = list(dst_params.keys())
elif dst_fwk == "keras":
from keras_.utils import prepare_model as prepare_model_ke
dst_net = prepare_model_ke(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="")
# dst_param_keys = list(dst_net._arg_names) + list(dst_net._aux_names)
dst_param_keys = [v.name for v in dst_net.weights]
dst_params = {}
for layer in dst_net.layers:
if layer.name:
for weight in layer.weights:
if weight.name:
dst_params.setdefault(weight.name, []).append(weight)
dst_params[weight.name] = (layer, weight)
elif dst_fwk == "tensorflow":
import tensorflow as tf
from tensorflow_.utils import prepare_model as prepare_model_tf
dst_net = prepare_model_tf(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="")
dst_param_keys = [v.name for v in tf.global_variables()]
dst_params = {v.name: v for v in tf.global_variables()}
elif dst_fwk == "tf2":
import tensorflow as tf
from tensorflow2.utils import prepare_model as prepare_model_tf2
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
dst_net = prepare_model_tf2(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="")
batch_size = 1
if model_type == "image":
input_shape = ((batch_size, 3, dst_net.in_size[0], dst_net.in_size[1]) if
dst_net.data_format == "channels_first" else
(batch_size, dst_net.in_size[0], dst_net.in_size[1], 3))
dst_net(tf.random.normal(input_shape))
else:
seq_len = 100 * 640
# input_shape = ((batch_size, dst_net.in_channels, seq_len) if
# dst_net.data_format == "channels_first" else
# (batch_size, seq_len, dst_net.in_channels))
input_shape = (batch_size, seq_len)
x_len = tf.convert_to_tensor(np.array([seq_len - 0], dtype=np.long))
dst_net(tf.random.normal(input_shape), x_len)
dst_param_keys = [v.name for v in dst_net.weights]
dst_params = {v.name: v for v in dst_net.weights}
elif dst_fwk == "tfl":
dst_net = None
dst_params = None
dst_param_keys = None
else:
raise ValueError("Unsupported dst fwk: {}".format(dst_fwk))
return dst_params, dst_param_keys, dst_net
def convert_mx2gl(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
src_model,
ctx):
if src_model in ["crunet56", "crunet116"]:
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [re.sub("^conv", "features.", key) for key in src_param_keys]
src_param_keys = [re.sub("^fc6", "output.1.", key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-a', '.body.conv1.', key) for key in src_param_keys]
src_param_keys = [re.sub('_c3x3-b', '.body.conv2A.', key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-b', '.body.conv2B.', key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-c', '.body.conv3.', key) for key in src_param_keys]
src_param_keys = [re.sub('_x__x_1x1_bases\[dim3\]_weight$', '_x__1.body.conv1.convT.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__x_3x3_bases\[dim21\]_weight$', '_x__1.body.conv2.convT.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(1\)_1x1_bases\[dim3\]_weight$', '_x__1.body.conv1.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(1\)_3x3_bases\[dim21\]_weight$', '_x__1.body.conv2.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(2\)_1x1_bases\[dim3\]_weight$', '_x__7.body.conv1.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(2\)_3x3_bases\[dim21\]_weight$', '_x__7.body.conv2.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(3\)_1x1_bases\[dim3\]_weight$', '_x__14.body.conv1.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(3\)_3x3_bases\[dim21\]_weight$', '_x__14.body.conv2.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-w\(s\/2\)', '.input_convZ.', key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-w_weight$', '.input_convZ.conv.weight', key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-w\(s\/1\)', '.input_conv.', key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-w\(s\/key\)', '.identity_conv.', key) for key in src_param_keys]
src_param_keys = [re.sub('__conv_weight$', '.conv.weight', key) for key in src_param_keys]
src_param_keys = [re.sub('__bn__bn_beta$', '.bn.beta', key) for key in src_param_keys]
src_param_keys = [re.sub('__bn__bn_gamma$', '.bn.gamma', key) for key in src_param_keys]
src_param_keys = [re.sub('__bn__bn_moving_mean$', '.bn.running_mean', key) for key in src_param_keys]
src_param_keys = [re.sub('__bn__bn_moving_var$', '.bn.running_var', key) for key in src_param_keys]
src_param_keys = [re.sub('1_x_1__relu-sp__bn_', '1_x_1.conv.bnA.', key) for key in src_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [re.sub("^features\.", "conv", key) for key in src_param_keys]
src_param_keys = [re.sub('^output\.1\.', 'fc6', key) for key in src_param_keys]
src_param_keys = [re.sub('_x__1\.body\.conv1\.convT\.weight$', '_x__x_1x1_bases[dim3]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__1\.body\.conv2\.convT\.weight$', '_x__x_3x3_bases[dim21]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__1\.body\.conv1\.convQ\.weight$', '_x__(1)_1x1_bases[dim3]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__1\.body\.conv2\.convQ\.weight$', '_x__(1)_3x3_bases[dim21]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__7\.body\.conv1\.convQ\.weight$', '_x__(2)_1x1_bases[dim3]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__7\.body\.conv2\.convQ\.weight$', '_x__(2)_3x3_bases[dim21]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__14\.body\.conv1\.convQ\.weight$', '_x__(3)_1x1_bases[dim3]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__14\.body\.conv2\.convQ\.weight$', '_x__(3)_3x3_bases[dim21]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('\.body\.conv1\.', '_c1x1-a', key) for key in src_param_keys]
src_param_keys = [re.sub('\.body\.conv2A\.', '_c3x3-b', key) for key in src_param_keys]
src_param_keys = [re.sub('\.body\.conv2B\.', '_c1x1-b', key) for key in src_param_keys]
src_param_keys = [re.sub('\.body\.conv3\.', '_c1x1-c', key) for key in src_param_keys]
src_param_keys = [re.sub('\.input_convZ\.conv\.weight$', '_c1x1-w_weight', key) for key in src_param_keys]
src_param_keys = [re.sub('\.input_convZ\.', '_c1x1-w(s/2)', key) for key in src_param_keys]
src_param_keys = [re.sub('\.input_conv\.', '_c1x1-w(s/1)', key) for key in src_param_keys]
src_param_keys = [re.sub('\.identity_conv\.', '_c1x1-w(s/key)', key) for key in src_param_keys]
src_param_keys = [re.sub('\.conv\.weight$', '__conv_weight', key) for key in src_param_keys]
src_param_keys = [re.sub('\.bn\.beta$', '__bn__bn_beta', key) for key in src_param_keys]
src_param_keys = [re.sub('\.bn\.gamma$', '__bn__bn_gamma', key) for key in src_param_keys]
src_param_keys = [re.sub('\.bn\.running_mean$', '__bn__bn_moving_mean', key) for key in src_param_keys]
src_param_keys = [re.sub('\.bn\.running_var$', '__bn__bn_moving_var', key) for key in src_param_keys]
src_param_keys = [re.sub('1_x_1\.conv\.bnA\.', '1_x_1__relu-sp__bn_', key) for key in src_param_keys]
dst_i = 0
for src_i, src_key in enumerate(src_param_keys):
dst_key = dst_param_keys[dst_i]
for tt in range(10):
if (dst_key.split('.')[-1].split('_')[-1] == src_key.split('_')[-1]) and\
(dst_params[dst_key].shape == src_params[src_key].shape):
break
assert (dst_key.split('.')[-1].split('_')[-1] == "weight")
dst_i += 1
dst_key = dst_param_keys[dst_i]
dst_i += 1
assert (dst_key.split('.')[-1].split('_')[-1] == src_key.split('_')[-1])
assert (dst_params[dst_key].shape == src_params[src_key].shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape)
dst_params[dst_key]._load_init(src_params[src_key], ctx)
for param in dst_net.collect_params().values():
if param._data is not None:
continue
print("param={}".format(param))
param.initialize(ctx=ctx)
dst_net.save_parameters(dst_params_file_path)
return
elif src_model in ["igcv3_w1"]:
src_param_keys = [key.replace("seq-", "features.") for key in src_param_keys]
src_param_keys = [key.replace("fc_", "output.1.") for key in src_param_keys]
src_param_keys = [key.replace('-batchnorm_beta', '.bn.beta') for key in src_param_keys]
src_param_keys = [key.replace('-batchnorm_gamma', '.bn.gamma') for key in src_param_keys]
src_param_keys = [key.replace('-batchnorm_moving_mean', '.bn.running_mean') for key in src_param_keys]
src_param_keys = [key.replace('-batchnorm_moving_var', '.bn.running_var') for key in src_param_keys]
src_param_keys = [key.replace('-conv2d_weight', '.conv.weight') for key in src_param_keys]
src_param_keys = [key.replace('first-3x3-conv', 'features.A') for key in src_param_keys]
src_param_keys = [key.replace('last-1x1-conv', 'features.B') for key in src_param_keys]
src_param_keys = [key.replace('-exp', '.conv1') for key in src_param_keys]
src_param_keys = [key.replace('-depthwise', '.conv2') for key in src_param_keys]
src_param_keys = [key.replace('-linear', '.conv3') for key in src_param_keys]
src_param_keys = [key.replace("-block", ".block") for key in src_param_keys]
dst_param_keys = [key.replace('features.0.', 'features.A.') for key in dst_param_keys]
dst_param_keys = [key.replace('features.6.', 'features.B.') for key in dst_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [key.replace('.bn.beta', '-batchnorm_beta') for key in src_param_keys]
src_param_keys = [key.replace('.bn.gamma', '-batchnorm_gamma') for key in src_param_keys]
src_param_keys = [key.replace('.bn.running_mean', '-batchnorm_moving_mean') for key in src_param_keys]
src_param_keys = [key.replace('.bn.running_var', '-batchnorm_moving_var') for key in src_param_keys]
src_param_keys = [key.replace('.conv.weight', '-conv2d_weight') for key in src_param_keys]
src_param_keys = [key.replace('features.A', 'first-3x3-conv') for key in src_param_keys]
src_param_keys = [key.replace('features.B', 'last-1x1-conv') for key in src_param_keys]
src_param_keys = [key.replace('.conv1', '-exp') for key in src_param_keys]
src_param_keys = [key.replace('.conv2', '-depthwise', ) for key in src_param_keys]
src_param_keys = [key.replace('.conv3', '-linear') for key in src_param_keys]
src_param_keys = [key.replace("features.", "seq-") for key in src_param_keys]
src_param_keys = [key.replace("output.1.", "fc_") for key in src_param_keys]
src_param_keys = [key.replace(".block", "-block") for key in src_param_keys]
dst_param_keys = [key.replace('features.A.', 'features.0.') for key in dst_param_keys]
dst_param_keys = [key.replace('features.B.', 'features.6.') for key in dst_param_keys]
elif src_model in ["preresnet269b"]:
dst_net.features[1][0].body.conv1a.bn.initialize(ctx=ctx, verbose=True, force_reinit=True)
dst1 = list(filter(re.compile("^features.1.0.body.conv1.bn.").search, dst_param_keys))
dst_param_keys = [key for key in dst_param_keys if key not in dst1]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [re.sub('^classifier_', "output.", key) for key in src_param_keys]
src_param_keys = [re.sub('^res', "features.", key) for key in src_param_keys]
src_param_keys = [re.sub('_conv1_weight$', '_conv1_aweight', key) for key in src_param_keys]
src_param_keys = [re.sub('_conv2_weight$', '_conv2_aweight', key) for key in src_param_keys]
src_param_keys = [re.sub('_conv3_weight$', '_conv3_aweight', key) for key in src_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [re.sub("^output\.", "classifier_", key) for key in src_param_keys]
src_param_keys = [re.sub("^features\.", "res", key) for key in src_param_keys]
src_param_keys = [re.sub('_conv1_aweight$', '_conv1_weight', key) for key in src_param_keys]
src_param_keys = [re.sub('_conv2_aweight$', '_conv2_weight', key) for key in src_param_keys]
src_param_keys = [re.sub('_conv3_aweight$', '_conv3_weight', key) for key in src_param_keys]
for src_i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
assert (dst_key.split('.')[-1].split('_')[-1] == src_key.split('_')[-1]), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape)
assert (dst_params[dst_key].shape == src_params[src_key].shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape)
dst_params[dst_key]._load_init(src_params[src_key], ctx)
for param in dst_net.collect_params().values():
if param._data is not None:
continue
print("param={}".format(param))
param.initialize(ctx=ctx)
dst_net.save_parameters(dst_params_file_path)
def convert_gl2ch(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
ext_src_param_keys,
ext_src_param_keys2,
src_model):
if src_model.startswith("diares") or src_model.startswith("diapreres"):
src1 = list(filter(re.compile("^features\.[0-9]*\.\d*[1-9]\d*\.attention").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n
assert (len(src_param_keys) == len(dst_param_keys))
if src_model.startswith("quartznet") or src_model.startswith("jasper"):
dst_param_keys = [key.replace("features/final_block/", "features/zfinal_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/W", "/weight") for key in dst_param_keys]
dst_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/features/body/", "/features/zbody/") for key in dst_param_keys]
dst_param_keys = [key.replace("features/final_postactiv/", "features/stageN/final_postactiv/") for key in dst_param_keys]
dst_param_keys = [key.replace("features/final_block/", "features/stageN/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/final_block/", "/zfinal_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("features/final_conv/", "features/stageN/final_conv/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in dst_param_keys]
if not src_model.startswith("ibppose_coco"):
dst_param_keys = [key.replace("/hg/", "/stage1_hg/") for key in dst_param_keys]
if src_model.startswith("centernet"):
dst_param_keys = [key.replace("/unit", "/a_unit") for key in dst_param_keys]
dst_param_keys = [key.replace("/reg_block/", "/z_reg_block/") for key in dst_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
if src_model.startswith("quartznet") or src_model.startswith("jasper"):
dst_param_keys = [key.replace("features/zfinal_block/", "features/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/weight", "/W") for key in dst_param_keys]
dst_param_keys = [key.replace("/zfinal_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/final_postactiv/", "/final_postactiv/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/features/zbody/", "/features/body/") for key in dst_param_keys]
dst_param_keys = [key.replace("features/stageN/final_conv/", "features/final_conv/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in dst_param_keys]
if not src_model.startswith("ibppose_coco"):
dst_param_keys = [key.replace("/stage1_hg/", "/hg/") for key in dst_param_keys]
if src_model.startswith("centernet"):
dst_param_keys = [key.replace("/a_unit", "/unit") for key in dst_param_keys]
dst_param_keys = [key.replace("/z_reg_block/", "/reg_block/") for key in dst_param_keys]
if src_model.startswith("wrn20_10_1bit") or src_model.startswith("wrn20_10_32bit"):
ext2_src_param_keys = [key.replace('.conv.weight', '.bn.beta') for key in src_param_keys if
key.endswith(".conv.weight")]
ext2_src_param_keys.append("features.4.bn.beta")
ext2_dst_param_keys = [key.replace("/conv/W", "/bn/beta") for key in dst_param_keys if key.endswith("/conv/W")]
ext2_dst_param_keys.append("/features/post_activ/bn/beta")
ext3_src_param_keys = {".".join(v.split(".")[:-1]): i for i, v in enumerate(ext2_src_param_keys)}
ext3_dst_param_keys = list(map(lambda x: x.split("/")[1:-1], ext2_dst_param_keys))
else:
ext2_src_param_keys = [key for key in src_param_keys if key.endswith(".beta")]
ext2_dst_param_keys = [key for key in dst_param_keys if key.endswith("/beta")]
ext3_src_param_keys = {".".join(v.split(".")[:-1]): i for i, v in enumerate(ext2_src_param_keys)}
ext3_dst_param_keys = list(map(lambda x: x.split("/")[1:-1], ext2_dst_param_keys))
for i, src_key in enumerate(ext_src_param_keys):
src_key1 = src_key.split(".")[-1]
src_key2 = ".".join(src_key.split(".")[:-1])
dst_ind = ext3_src_param_keys[src_key2]
dst_path = ext3_dst_param_keys[dst_ind]
obj = dst_net
for j, sub_path in enumerate(dst_path):
obj = getattr(obj, sub_path)
if src_key1 == 'running_mean':
assert (obj.avg_mean.shape == src_params[src_key].shape), \
"src_key={}, dst_path={}, src_shape={}, obj.avg_mean.shape={}".format(
src_key, dst_path, src_params[src_key].shape, obj.avg_mean.shape)
obj.avg_mean = src_params[src_key]._data[0].asnumpy()
elif src_key1 == 'running_var':
assert (obj.avg_var.shape == src_params[src_key].shape)
obj.avg_var = src_params[src_key]._data[0].asnumpy()
if src_model in ["condensenet74_c4_g4", "condensenet74_c8_g8"]:
assert (dst_net.output.fc.index.shape == src_params["output.1.index"].shape)
dst_net.output.fc.index = src_params["output.1.index"]._data[0].asnumpy().astype(np.int32)
ext_src_param_keys2.remove("output.1.index")
ext2_src_param_keys = [key for key in src_param_keys if key.endswith(".conv1.conv.weight")]
ext2_dst_param_keys = [key for key in dst_param_keys if key.endswith("/conv1/conv/W")]
ext3_src_param_keys = {".".join(v.split(".")[:-2]): i for i, v in enumerate(ext2_src_param_keys)}
ext3_dst_param_keys = list(map(lambda x: x.split("/")[1:-2], ext2_dst_param_keys))
for i, src_key in enumerate(ext_src_param_keys2):
src_key2 = ".".join(src_key.split(".")[:-1])
dst_ind = ext3_src_param_keys[src_key2]
dst_path = ext3_dst_param_keys[dst_ind]
obj = dst_net
for j, sub_path in enumerate(dst_path):
obj = getattr(obj, sub_path)
assert (obj.index.shape == src_params[src_key].shape), \
"src_key={}, dst_path={}, src_shape={}, obj.index.shape={}".format(
src_key, dst_path, src_params[src_key].shape, obj.index.shape)
obj.index = src_params[src_key]._data[0].asnumpy().astype(np.int32)
elif src_model.startswith("xdensenet"):
ext2_src_param_keys = [key for key in src_param_keys if key.endswith(".conv1.conv.weight")] +\
[key for key in src_param_keys if key.endswith(".conv2.conv.weight")]
ext2_dst_param_keys = [key for key in dst_param_keys if key.endswith("/conv1/conv/W")] +\
[key for key in dst_param_keys if key.endswith("/conv2/conv/W")]
ext3_src_param_keys = {".".join(v.split(".")[:-1]): i for i, v in enumerate(ext2_src_param_keys)}
ext3_dst_param_keys = list(map(lambda x: x.split("/")[1:-1], ext2_dst_param_keys))
for i, src_key in enumerate(ext_src_param_keys2):
src_key2 = ".".join(src_key.split(".")[:-1])
dst_ind = ext3_src_param_keys[src_key2]
dst_path = ext3_dst_param_keys[dst_ind]
obj = dst_net
for j, sub_path in enumerate(dst_path):
obj = getattr(obj, sub_path)
assert (obj.mask.shape == src_params[src_key].shape), \
"src_key={}, dst_path={}, src_shape={}, obj.index.shape={}".format(
src_key, dst_path, src_params[src_key].shape, obj.mask.shape)
obj.mask = src_params[src_key]._data[0].asnumpy()
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
assert (dst_params[dst_key].array.shape == src_params[src_key].shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].array.shape)
dst_params[dst_key].array = src_params[src_key]._data[0].asnumpy()
# print("src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
# src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].array.shape))
from chainer.serializers import save_npz
save_npz(
file=dst_params_file_path,
obj=dst_net)
def convert_gl2gl(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
finetune,
src_model,
ctx):
if src_model.startswith("oth_danet_resnet"):
src6 = list(filter(re.compile("^head.sa.gamma").search, src_param_keys))
src6n = [key for key in src_param_keys if key not in src6]
src_param_keys = src6n + src6
src7 = list(filter(re.compile("^head.conv51").search, src_param_keys))
src7n = [key for key in src_param_keys if key not in src7]
src_param_keys = src7n + src7
src8 = list(filter(re.compile("^head.conv6").search, src_param_keys))
src8n = [key for key in src_param_keys if key not in src8]
src_param_keys = src8n + src8
src1 = list(filter(re.compile("^head.conv5c").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n + src1
src2 = list(filter(re.compile("^head.sc").search, src_param_keys))
src2n = [key for key in src_param_keys if key not in src2]
src_param_keys = src2n + src2
src3 = list(filter(re.compile("^head.conv52").search, src_param_keys))
src3n = [key for key in src_param_keys if key not in src3]
src_param_keys = src3n + src3
src4 = list(filter(re.compile("^head.conv7").search, src_param_keys))
src4n = [key for key in src_param_keys if key not in src4]
src_param_keys = src4n + src4
src5 = list(filter(re.compile("^head.conv8").search, src_param_keys))
src5n = [key for key in src_param_keys if key not in src5]
src_param_keys = src5n + src5
elif src_model.startswith("oth_icnet_resnet50_citys"):
src1 = list(filter(re.compile("^conv_sub1").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1 + src1n
src2 = list(filter(re.compile("^head").search, src_param_keys))
src2n = [key for key in src_param_keys if key not in src2]
src_param_keys = src2n + src2
elif src_model.startswith("oth_fastscnn_citys"):
src1 = list(filter(re.compile("^feature_fusion").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n + src1
dst0 = list(filter(re.compile("^fusion").search, dst_param_keys))
dst0n = [key for key in dst_param_keys if key not in dst0]
dst_param_keys = dst0n + dst0
dst1 = list(filter(re.compile("^fusion.low_pw_conv.bn").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1n + dst1
dst2 = list(filter(re.compile("^fusion.high_conv.bn").search, dst_param_keys))
dst2n = [key for key in dst_param_keys if key not in dst2]
dst_param_keys = dst2n + dst2
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
if dst_params[dst_key].shape != src_params[src_key].shape:
logging.warning(
"dst_param.shape != src_param.shape, src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape))
if finetune:
continue
else:
raise ValueError
if dst_key.split('.')[-1] != src_key.split('.')[-1]:
logging.warning(
"dst_key.suff != src_key.suff, src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape))
dst_params[dst_key]._load_init(src_params[src_key]._data[0], ctx)
dst_net.save_parameters(dst_params_file_path)
def convert_gl2ke(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys):
import mxnet as mx
dst_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/final_block/", "/stageN/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in dst_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in dst_param_keys]
dst_param_keys_orig = dst_param_keys.copy()
dst_param_keys = [s[:(s.find("convgroup") + 9)] + "/" + s.split("/")[-1] if s.find("convgroup") >= 0 else s
for s in dst_param_keys]
dst_param_keys_uniq, dst_param_keys_index = np.unique(dst_param_keys, return_index=True)
dst_param_keys = list(dst_param_keys_uniq[dst_param_keys_index.argsort()])
# dst_param_keys = list(np.unique(dst_param_keys))
assert (len(src_param_keys) == len(dst_param_keys))
def process_width(src_key, dst_key, src_weight):
dst_layer = dst_params[dst_key][0]
dst_weight = dst_params[dst_key][1]
if (dst_layer.__class__.__name__ in ["Conv2D"]) and dst_key.endswith("kernel1") and\
(dst_layer.data_format == "channels_last"):
src_weight = np.transpose(src_weight, (2, 3, 1, 0))
if (dst_layer.__class__.__name__ in ["DepthwiseConv2D"]) and dst_key.endswith("kernel1") and\
(dst_layer.data_format == "channels_last"):
src_weight = np.transpose(src_weight, (2, 3, 0, 1))
if (dst_layer.__class__.__name__ in ["Dense"]) and dst_key.endswith("kernel1"):
src_weight = np.transpose(src_weight, (1, 0))
assert (dst_weight._keras_shape == src_weight.shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_weight.shape, dst_weight._keras_shape)
# print("src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
# src_key, dst_key, src_weight.shape, dst_weight._keras_shape))
dst_weight.bind(mx.nd.array(src_weight))
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
if dst_key.find("convgroup") >= 0:
dst_key_stem = dst_key[:(dst_key.find("convgroup") + 9)]
dst_keys = [s for s in dst_param_keys_orig if s.startswith(dst_key_stem)]
if src_key.endswith("weight"):
dst_keys = [s for s in dst_keys if s.endswith("kernel1")]
elif src_key.endswith("bias"):
dst_keys = [s for s in dst_keys if s.endswith("bias1")]
groups = len(dst_keys)
src_weight0 = src_params[src_key]._data[0]
src_weight0_list = mx.nd.split(src_weight0, axis=0, num_outputs=groups)
for gi in range(groups):
src_weight_gi = src_weight0_list[gi].asnumpy()
dst_key_gi = dst_keys[gi]
process_width(src_key, dst_key_gi, src_weight_gi)
else:
src_weight = src_params[src_key]._data[0].asnumpy()
process_width(src_key, dst_key, src_weight)
dst_net.save_weights(dst_params_file_path)
def convert_gl2tf(dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys):
import mxnet as mx
dst_param_keys = [key.replace("/kernel:", "/weight:") for key in dst_param_keys]
dst_param_keys = [key.replace("/dw_kernel:", "/weight_dw:") for key in dst_param_keys]
dst_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/final_block/", "/stageN/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in dst_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys = [key.replace("/weight:", "/kernel:") for key in dst_param_keys]
dst_param_keys = [key.replace("/weight_dw:", "/dw_kernel:") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in dst_param_keys]
dst_param_keys_orig = dst_param_keys.copy()
dst_param_keys = [s[:(s.find("convgroup") + 9)] + "/" + s.split("/")[-1] if s.find("convgroup") >= 0 else s
for s in dst_param_keys]
dst_param_keys_uniq, dst_param_keys_index = np.unique(dst_param_keys, return_index=True)
dst_param_keys = list(dst_param_keys_uniq[dst_param_keys_index.argsort()])
assert (len(src_param_keys) == len(dst_param_keys))
import tensorflow as tf
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
def process_width(src_key, dst_key, src_weight):
if len(src_weight.shape) == 4:
if dst_key.split("/")[-1][:-2] == "dw_kernel":
src_weight = np.transpose(src_weight, axes=(2, 3, 0, 1))
else:
src_weight = np.transpose(src_weight, axes=(2, 3, 1, 0))
elif len(src_weight.shape) == 2:
src_weight = np.transpose(src_weight, axes=(1, 0))
assert (tuple(dst_params[dst_key].get_shape().as_list()) == src_weight.shape)
sess.run(dst_params[dst_key].assign(src_weight))
# print(dst_params[dst_key].eval(sess))
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
if dst_key.find("convgroup") >= 0:
dst_key_stem = dst_key[:(dst_key.find("convgroup") + 9)]
dst_keys = [s for s in dst_param_keys_orig if s.startswith(dst_key_stem)]
if src_key.endswith("weight"):
dst_keys = [s for s in dst_keys if s.endswith("kernel:0")]
elif src_key.endswith("bias"):
dst_keys = [s for s in dst_keys if s.endswith("bias:0")]
groups = len(dst_keys)
src_weight0 = src_params[src_key]._data[0]
src_weight0_list = mx.nd.split(src_weight0, axis=0, num_outputs=groups)
for gi in range(groups):
src_weight_gi = src_weight0_list[gi].asnumpy()
dst_key_gi = dst_keys[gi]
process_width(src_key, dst_key_gi, src_weight_gi)
else:
src_weight = src_params[src_key]._data[0].asnumpy()
process_width(src_key, dst_key, src_weight)
# saver = tf.train.Saver()
# saver.save(
# sess=sess,
# save_path=dst_params_file_path)
from tensorflow_.utils import save_model_params
save_model_params(
sess=sess,
file_path=dst_params_file_path)
def convert_gl2tf2(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
src_model):
if src_model.startswith("hrnet"):
src_param_keys = [key.replace(".transition.", ".atransition.") for key in src_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
if src_model.startswith("hrnet"):
src_param_keys = [key.replace(".atransition.", ".transition.") for key in src_param_keys]
dst_param_keys = [key.replace("/kernel:", "/weight:") for key in dst_param_keys]
dst_param_keys = [key.replace("/depthwise_kernel:", "/weight_depthwise:") for key in dst_param_keys]
dst_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in dst_param_keys]
if (not src_model.startswith("pspnet_")) and (not src_model.startswith("deeplabv3_")) and\
(not src_model.startswith("simplepose_")) and (not src_model.startswith("alphapose_")) and\
(not src_model.startswith("lwopenpose")) and (not src_model.startswith("quartznet")) and\
(not src_model.startswith("jasper")):
dst_param_keys = [key.replace("/final_block/", "/stageN/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/final_block/", "/zfinal_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in dst_param_keys]
if src_model.startswith("hrnet"):
dst_param_keys = [key.replace("/transition/", "/atransition/") for key in dst_param_keys]
if src_model.startswith("hardnet"):
# dst_param_keys = [key.replace('/dw_conv/', '/z_dw_conv/') for key in dst_param_keys]
dst_param_keys = [key.replace("features/down", "features/z_down") for key in dst_param_keys]
if src_model.startswith("centernet"):
dst_param_keys = [key.replace("/unit", "/a_unit") for key in dst_param_keys]
dst_param_keys = [key.replace("/reg_block/", "/z_reg_block/") for key in dst_param_keys]
# if src_model.startswith("danet"):
# dst_param_keys = [key.replace("da_net/head/", "z_da_net/head/") for key in dst_param_keys]
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys = [key.replace("/weight:", "/kernel:") for key in dst_param_keys]
dst_param_keys = [key.replace("/weight_depthwise:", "/depthwise_kernel:") for key in dst_param_keys]
dst_param_keys = [key.replace("/zfinal_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in dst_param_keys]
if (not src_model.startswith("pspnet_")) and (not src_model.startswith("deeplabv3_")) and\
(not src_model.startswith("simplepose_")) and (not src_model.startswith("alphapose_")) and\
(not src_model.startswith("lwopenpose")) and (not src_model.startswith("quartznet")) and\
(not src_model.startswith("jasper")):
dst_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in dst_param_keys]
if src_model.startswith("hrnet"):
dst_param_keys = [key.replace("/atransition/", "/transition/") for key in dst_param_keys]
if src_model.startswith("hardnet"):
# dst_param_keys = [key.replace('/z_dw_conv/', '/dw_conv/') for key in dst_param_keys]
dst_param_keys = [key.replace("features/z_down", "features/down") for key in dst_param_keys]
if src_model.startswith("centernet"):
dst_param_keys = [key.replace("/a_unit", "/unit") for key in dst_param_keys]
dst_param_keys = [key.replace("/z_reg_block/", "/reg_block/") for key in dst_param_keys]
# if src_model.startswith("danet"):
# dst_param_keys = [key.replace("z_da_net/head/", "da_net/head/") for key in dst_param_keys]
dst_param_keys_orig = dst_param_keys.copy()
dst_param_keys = [s[:(s.find("convgroup") + 9)] + "/" + s.split("/")[-1] if s.find("convgroup") >= 0 else s
for s in dst_param_keys]
dst_param_keys_uniq, dst_param_keys_index = np.unique(dst_param_keys, return_index=True)
dst_param_keys = list(dst_param_keys_uniq[dst_param_keys_index.argsort()])
assert (len(src_param_keys) == len(dst_param_keys))
def process_width(src_key, dst_key, src_weight):
if len(src_weight.shape) == 4:
if dst_key.split("/")[-1][:-2] == "depthwise_kernel":
src_weight = np.transpose(src_weight, axes=(2, 3, 0, 1))
else:
src_weight = np.transpose(src_weight, axes=(2, 3, 1, 0))
elif len(src_weight.shape) == 2:
src_weight = np.transpose(src_weight, axes=(1, 0))
elif len(src_weight.shape) == 3:
if not ((src_model.startswith("jasper") or src_model.startswith("quartznet")) and
dst_key.split("/")[-1][:-2] == "fb"):
src_weight = np.transpose(src_weight, axes=(2, 1, 0))
if dst_key.split("/")[-1][:-2] == "depthwise_kernel":
assert(len(dst_params[dst_key].shape) == 4)
src_weight = np.expand_dims(src_weight, -1)
dst_weight = dst_params[dst_key]
assert (tuple(dst_weight.shape) == src_weight.shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_weight.shape, tuple(dst_weight.shape))
dst_weight.assign(src_weight)
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
# print("src_key={},\tsrc_key2={},\tdst_key={}".format(src_key, src_params[src_key].name, dst_key))
if dst_key.find("convgroup") >= 0:
import mxnet as mx
dst_key_stem = dst_key[:(dst_key.find("convgroup") + 9)]
dst_keys = [s for s in dst_param_keys_orig if s.startswith(dst_key_stem)]
if src_key.endswith("weight"):
dst_keys = [s for s in dst_keys if s.endswith("kernel:0")]
elif src_key.endswith("bias"):
dst_keys = [s for s in dst_keys if s.endswith("bias:0")]
groups = len(dst_keys)
src_weight0 = src_params[src_key]._data[0]
src_weight0_list = mx.nd.split(src_weight0, axis=0, num_outputs=groups)
for gi in range(groups):
src_weight_gi = src_weight0_list[gi].asnumpy()
dst_key_gi = dst_keys[gi]
process_width(src_key, dst_key_gi, src_weight_gi)
else:
src_weight = src_params[src_key]._data[0].asnumpy()
process_width(src_key, dst_key, src_weight)
dst_net.save_weights(dst_params_file_path)
def convert_pt2pt(dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
src_model,
dst_model):
import torch
if src_model.startswith("oth_quartznet") or src_model.startswith("oth_jasper"):
src1 = list(filter(re.compile("\.res\.").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n + src1
dst1 = list(filter(re.compile("\.identity_block\.").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1n + dst1
elif src_model.startswith("oth_dicenet"):
src1 = list(filter(re.compile("\.conv_height\.").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src2 = list(filter(re.compile("\.conv_width\.").search, src1n))
src2n = [key for key in src1n if key not in src2]
src3 = list(filter(re.compile("\.linear_comb_layer\.").search, src2n))
src3n = [key for key in src2n if key not in src3]
src4 = list(filter(re.compile("\.proj_layer\.").search, src3n))
src4n = [key for key in src3n if key not in src4]
src_param_keys = src4n + src1 + src2 + src3 + src4
dst1 = list(filter(re.compile("\.h_conv\.").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst2 = list(filter(re.compile("\.w_conv\.").search, dst1n))
dst2n = [key for key in dst1n if key not in dst2]
dst3 = list(filter(re.compile("\.att\.").search, dst2n))
dst3n = [key for key in dst2n if key not in dst3]
dst4 = list(filter(re.compile("\.proj_conv\.").search, dst3n))
dst4n = [key for key in dst3n if key not in dst4]
dst_param_keys = dst4n + dst1 + dst2 + dst3 + dst4
elif src_model.startswith("oth_proxyless"):
src1 = src_param_keys[5]
del src_param_keys[5]
src_param_keys.insert(0, src1)
src2 = src_param_keys[-3]
del src_param_keys[-3]
src_param_keys.insert(-7, src2)
elif src_model.startswith("oth_scnet"):
pass
src1 = list(filter(re.compile(".k1.").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src2 = list(filter(re.compile(".scconv.").search, src1n))
src2n = [key for key in src1n if key not in src2]
src_param_keys = src2n + src1 + src2
dst1 = list(filter(re.compile(".conv2a.").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst2 = list(filter(re.compile(".conv2b.").search, dst1n))
dst2n = [key for key in dst1n if key not in dst2]
dst_param_keys = dst2n + dst1 + dst2
elif src_model == "oth_bisenet":
src1 = list(filter(re.compile("^cp.conv_avg").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src2 = list(filter(re.compile("^cp.arm32").search, src1n))
src2n = [key for key in src1n if key not in src2]
src3 = list(filter(re.compile("^cp.conv_head32").search, src2n))
src3n = [key for key in src2n if key not in src3]
src4 = list(filter(re.compile("^cp.arm16").search, src3n))
src4n = [key for key in src3n if key not in src4]
src5 = list(filter(re.compile("^cp.conv_head16").search, src4n))
src5n = [key for key in src4n if key not in src5]
src6 = list(filter(re.compile("^ffm").search, src5n))
src6n = [key for key in src5n if key not in src6]
src_param_keys = src6n + src1 + src2 + src3 + src4 + src5 + src6
dst1 = list(filter(re.compile("^pool").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1n + dst1
elif src_model.startswith("oth_dla"):
src1 = list(filter(re.compile("\.project").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1 + src1n
dst1 = list(filter(re.compile("\.project_conv").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1 + dst1n
elif dst_model == "ntsnet":
src1 = list(filter(re.compile("^proposal_net").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1 + src1n
dst1 = list(filter(re.compile("^navigator_unit\.branch\d+\.down").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst2 = list(filter(re.compile("^navigator_unit\.branch\d+\.tidy").search, dst1n))
dst2n = [key for key in dst1n if key not in dst2]
dst_param_keys = dst1 + dst2 + dst2n
elif dst_model == "fishnet150":
src1 = list(filter(re.compile("^(conv|fish\.fish\.[0-2])").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src2 = list(filter(re.compile("^fish\.fish\.6\.1").search, src1n))
src2n = [key for key in src1n if key not in src2]
src3 = list(filter(re.compile("^fish\.fish\.5\.1").search, src2n))
src3n = [key for key in src2n if key not in src3]
src4 = list(filter(re.compile("^fish\.fish\.4\.1").search, src3n))
src4n = [key for key in src3n if key not in src4]
src5 = list(filter(re.compile("^fish\.fish\.3\.[0-1]").search, src4n))
src5n = [key for key in src4n if key not in src5]
src6 = list(filter(re.compile("^fish\.fish\.3\.3").search, src5n))
src6n = [key for key in src5n if key not in src6]
src7 = list(filter(re.compile("^fish\.fish\.[3-6]").search, src6n))
src7n = [key for key in src6n if key not in src7]
src8 = list(filter(re.compile("^fish\.fish\.9\.1").search, src7n))
src8n = [key for key in src7n if key not in src8]
src9 = list(filter(re.compile("^fish\.fish\.8\.1").search, src8n))
src9n = [key for key in src8n if key not in src9]
src10 = list(filter(re.compile("^fish\.fish\.7\.1").search, src9n))
src10n = [key for key in src9n if key not in src10]
src_param_keys = src1 + src2 + src3 + src4 + src5 + src6 + src7 + src8 + src9 + src10 + src10n
elif dst_model == "bam_resnet50":
src_bams = list(filter(re.compile("^bam").search, src_param_keys))
src_param_keys = [key for key in src_param_keys if key not in src_bams]
src_param_keys = src_param_keys + src_bams
dst_bams = list(filter(re.compile("^features.stage[0-9].unit1.bam.").search, dst_param_keys))
dst_param_keys = [key for key in dst_param_keys if key not in dst_bams]
dst_param_keys = dst_param_keys + dst_bams
elif dst_model.startswith("sinet"):
src1 = list(filter(re.compile("\.vertical.weight").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n + src1
src2 = list(filter(re.compile("\.horizontal.weight").search, src_param_keys))
src2n = [key for key in src_param_keys if key not in src2]
src_param_keys = src2n + src2
src3 = list(filter(re.compile("\.B_v\.").search, src_param_keys))
src3n = [key for key in src_param_keys if key not in src3]
src_param_keys = src3n + src3
src4 = list(filter(re.compile("\.B_h\.").search, src_param_keys))
src4n = [key for key in src_param_keys if key not in src4]
src_param_keys = src4n + src4
src5 = list(filter(re.compile("bn_4\.").search, src_param_keys))
src5n = [key for key in src_param_keys if key not in src5]
src_param_keys = src5n + src5
src6 = list(filter(re.compile("bn_3\.").search, src_param_keys))
src6n = [key for key in src_param_keys if key not in src6]
src_param_keys = src6n + src6
dst1 = list(filter(re.compile("\.v_conv.conv\.").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1n + dst1
dst2 = list(filter(re.compile("\.h_conv.conv\.").search, dst_param_keys))
dst2n = [key for key in dst_param_keys if key not in dst2]
dst_param_keys = dst2n + dst2
dst3 = list(filter(re.compile("\.v_conv.bn\.").search, dst_param_keys))
dst3n = [key for key in dst_param_keys if key not in dst3]
dst_param_keys = dst3n + dst3
dst4 = list(filter(re.compile("\.h_conv.bn\.").search, dst_param_keys))
dst4n = [key for key in dst_param_keys if key not in dst4]
dst_param_keys = dst4n + dst4
dst5 = list(filter(re.compile("decoder.decode1.bn\.").search, dst_param_keys))
dst5n = [key for key in dst_param_keys if key not in dst5]
dst_param_keys = dst5n + dst5
dst6 = list(filter(re.compile("decoder.decode2.bn\.").search, dst_param_keys))
dst6n = [key for key in dst_param_keys if key not in dst6]
dst_param_keys = dst6n + dst6
elif src_model.startswith("oth_ibppose"):
def sort_hg(src2):
src2b1 = list(filter(re.compile("^hourglass.[0-9].hg.0.1.").search, src2))
src2b2 = list(filter(re.compile("^hourglass.[0-9].hg.1.1.").search, src2))
src2b3 = list(filter(re.compile("^hourglass.[0-9].hg.2.1.").search, src2))
src2b4 = list(filter(re.compile("^hourglass.[0-9].hg.3.1.").search, src2))
src2b5 = list(filter(re.compile("^hourglass.[0-9].hg.3.2.").search, src2))
src2b6 = list(filter(re.compile("^hourglass.[0-9].hg.3.3.").search, src2))
src2b7 = list(filter(re.compile("^hourglass.[0-9].hg.2.2.").search, src2))
src2b8 = list(filter(re.compile("^hourglass.[0-9].hg.2.3.").search, src2))
src2b9 = list(filter(re.compile("^hourglass.[0-9].hg.1.2.").search, src2))
src2b10 = list(filter(re.compile("^hourglass.[0-9].hg.1.3.").search, src2))
src2b11 = list(filter(re.compile("^hourglass.[0-9].hg.0.2.").search, src2))
src2b12 = list(filter(re.compile("^hourglass.[0-9].hg.0.3.").search, src2))
src2b13 = list(filter(re.compile("^hourglass.[0-9].hg.0.0.").search, src2))
src2b14 = list(filter(re.compile("^hourglass.[0-9].hg.1.0.").search, src2))
src2b15 = list(filter(re.compile("^hourglass.[0-9].hg.2.0.").search, src2))
src2b16 = list(filter(re.compile("^hourglass.[0-9].hg.3.0.").search, src2))
src2b17 = list(filter(re.compile("^hourglass.[0-9].hg.3.4.").search, src2))
return src2b1 + src2b2 + src2b3 + src2b4 +\
src2b11 + src2b12 + src2b9 + src2b10 + src2b7 + src2b8 + src2b5 + src2b6 +\
src2b13 + src2b14 + src2b15 + src2b16 + src2b17
src1 = list(filter(re.compile("^pre.").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n + src1
src2 = list(filter(re.compile("^hourglass.").search, src_param_keys))
src2n = [key for key in src_param_keys if key not in src2]
src2b1 = sort_hg(list(filter(re.compile("^hourglass.0.hg.").search, src2)))
src2b2 = sort_hg(list(filter(re.compile("^hourglass.1.hg.").search, src2)))
src2b3 = sort_hg(list(filter(re.compile("^hourglass.2.hg.").search, src2)))
src2b4 = sort_hg(list(filter(re.compile("^hourglass.3.hg.").search, src2)))
src_param_keys = src2n + src2b1 + src2b2 + src2b3 + src2b4
src3 = list(filter(re.compile("^features.[0-9].before_regress").search, src_param_keys))
src3n = [key for key in src_param_keys if key not in src3]
src3b = list(filter(re.compile("^features.[0-9].before_regress.0.").search, src3))
src_param_keys = src3n + src3b
src4 = list(filter(re.compile("^outs.[0-9].").search, src_param_keys))
src4n = [key for key in src_param_keys if key not in src4]
src4b = list(filter(re.compile("^outs.[0-9].0.").search, src4))
src_param_keys = src4n + src4b
src5 = list(filter(re.compile("^merge_features.[0-9].").search, src_param_keys))
src5n = [key for key in src_param_keys if key not in src5]
src5b = list(filter(re.compile("^merge_features.[0-9].0.").search, src5))
src_param_keys = src5n + src5b
src6 = list(filter(re.compile("^merge_preds.[0-9].").search, src_param_keys))
src6n = [key for key in src_param_keys if key not in src6]
src6b = list(filter(re.compile("^merge_preds.[0-9].0.").search, src6))
src_param_keys = src6n + src6b
dst1 = list(filter(re.compile("^backbone.").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1n + dst1
dst2 = list(filter(re.compile("^decoder.pass[1-9].hg.").search, dst_param_keys))
dst2n = [key for key in dst_param_keys if key not in dst2]
dst_param_keys = dst2n + dst2
dst3 = list(filter(re.compile("^decoder.pass[1-9].pre_block.").search, dst_param_keys))
dst3n = [key for key in dst_param_keys if key not in dst3]
dst_param_keys = dst3n + dst3
dst4 = list(filter(re.compile("^decoder.pass[1-9].post_block.").search, dst_param_keys))
dst4n = [key for key in dst_param_keys if key not in dst4]
dst_param_keys = dst4n + dst4
dst5 = list(filter(re.compile("^decoder.pass[1-9].pre_merge_block.").search, dst_param_keys))
dst5n = [key for key in dst_param_keys if key not in dst5]
dst_param_keys = dst5n + dst5
dst6 = list(filter(re.compile("^decoder.pass[1-9].post_merge_block.").search, dst_param_keys))
dst6n = [key for key in dst_param_keys if key not in dst6]
dst_param_keys = dst6n + dst6
assert (len(src_param_keys) == len(dst_param_keys))
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
if (src_model == "oth_shufflenetv2_wd2" and dst_model == "shufflenetv2_wd2") and \
(src_key == "network.8.weight"):
dst_params[dst_key] = torch.from_numpy(src_params[src_key].numpy()[:, :, 0, 0])
else:
# print("src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
# src_key, dst_key, tuple(src_params[src_key].size()), tuple(dst_params[dst_key].size())))
assert (tuple(dst_params[dst_key].size()) == tuple(src_params[src_key].size())), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, tuple(src_params[src_key].size()), tuple(dst_params[dst_key].size()))
assert (dst_key.split('.')[-1] == src_key.split('.')[-1])
dst_params[dst_key] = torch.from_numpy(src_params[src_key].numpy())
torch.save(
obj=dst_params,
f=dst_params_file_path)
def convert_gl2pt(dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys):
import torch
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
assert (tuple(dst_params[dst_key].size()) == src_params[src_key].shape)
dst_params[dst_key] = torch.from_numpy(src_params[src_key]._data[0].asnumpy())
torch.save(
obj=dst_params,
f=dst_params_file_path)
def convert_pt2gl(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
ctx):
import mxnet as mx
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
assert (dst_params[dst_key].shape == tuple(src_params[src_key].size())), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, tuple(src_params[src_key].size()), dst_params[dst_key].shape)
dst_params[dst_key]._load_init(mx.nd.array(src_params[src_key].numpy(), ctx), ctx)
dst_net.save_parameters(dst_params_file_path)
def convert_tf2tf(dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys):
import re
src_param_keys = [key.replace("/W:", "/kernel:") for key in src_param_keys]
src_param_keys = [key.replace("/b:", "/bias:") for key in src_param_keys]
src_param_keys = [key.replace("linear/", "output/") for key in src_param_keys]
src_param_keys = [key.replace("stage", "features/stage") for key in src_param_keys]
src_param_keys = [re.sub("^conv1/", "features/init_block/conv/", key) for key in src_param_keys]
src_param_keys = [re.sub("^conv5/", "features/final_block/conv/", key) for key in src_param_keys]
src_param_keys = [key.replace('/dconv_bn/', '/dconv/bn/') for key in src_param_keys]
src_param_keys = [key.replace('/shortcut_dconv_bn/', '/shortcut_dconv/bn/') for key in src_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [key.replace("/kernel:", "/W:") for key in src_param_keys]
src_param_keys = [key.replace("/bias:", "/b:") for key in src_param_keys]
src_param_keys = [key.replace("output/", "linear/") for key in src_param_keys]
src_param_keys = [key.replace("features/stage", "stage") for key in src_param_keys]
src_param_keys = [key.replace("features/init_block/conv/", 'conv1/') for key in src_param_keys]
src_param_keys = [key.replace("features/final_block/conv/", 'conv5/') for key in src_param_keys]
src_param_keys = [key.replace('/dconv/bn/', '/dconv_bn/') for key in src_param_keys]
src_param_keys = [key.replace('/shortcut_dconv/bn/', '/shortcut_dconv_bn/') for key in src_param_keys]
assert (len(src_param_keys) == len(dst_param_keys))
import tensorflow as tf
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
assert (src_params[src_key].shape == tuple(dst_params[dst_key].get_shape().as_list()))
sess.run(dst_params[dst_key].assign(src_params[src_key]))
from tensorflow_.utils import save_model_params
save_model_params(
sess=sess,
file_path=dst_params_file_path)
def convert_tf2gl(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
ctx):
import mxnet as mx
src_param_keys = [key.replace("/kernel:", "/weight:") for key in src_param_keys]
src_param_keys = [key.replace("/dw_kernel:", "/weight_dw:") for key in src_param_keys]
src_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in src_param_keys]
src_param_keys = [key.replace("/final_block/", "/stageN/final_block/") for key in src_param_keys]
src_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in src_param_keys]
src_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in src_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [key.replace("/weight:", "/kernel:") for key in src_param_keys]
src_param_keys = [key.replace("/weight_dw:", "/dw_kernel:") for key in src_param_keys]
src_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in src_param_keys]
src_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in src_param_keys]
src_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in src_param_keys]
src_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in src_param_keys]
assert (len(src_param_keys) == len(dst_param_keys))
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
src_weight = src_params[src_key]
if len(src_weight.shape) == 4:
if src_key.split("/")[-1][:-2] == "dw_kernel":
dst_weight = np.transpose(src_weight, axes=(2, 3, 0, 1))
else:
dst_weight = np.transpose(src_weight, axes=(3, 2, 0, 1))
elif len(src_weight.shape) == 2:
dst_weight = np.transpose(src_weight, axes=(1, 0))
else:
dst_weight = src_weight
assert (dst_weight.shape == dst_params[dst_key].shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, dst_weight.shape, dst_params[dst_key].shape)
dst_params[dst_key]._load_init(mx.nd.array(dst_weight, ctx), ctx)
dst_net.save_parameters(dst_params_file_path)
def convert_tf22tfl(src_net,
dst_params_file_path):
import tensorflow as tf
converter = tf.lite.TFLiteConverter.from_keras_model(src_net)
tflite_model = converter.convert()
open(dst_params_file_path, "wb").write(tflite_model)
# batch_size = 1
# input_shape = ((batch_size, 3, src_net.in_size[0], src_net.in_size[1]) if
# src_net.data_format == "channels_first" else
# (batch_size, src_net.in_size[0], src_net.in_size[1], 3))
# input_data = tf.random.normal(input_shape)
# tf_results = src_net(input_data)
# interpreter = tf.lite.Interpreter(model_content=tflite_model)
# interpreter.allocate_tensors()
# input_details = interpreter.get_input_details()
# output_details = interpreter.get_output_details()
# input_data = np.array(np.random.random_sample(input_details[0]["shape"]), dtype=np.float32)
# interpreter.set_tensor(input_details[0]["index"], input_data)
# interpreter.invoke()
# tflite_results = interpreter.get_tensor(output_details[0]["index"])
# for tf_result, tflite_result in zip(tf_results, tflite_results):
# np.testing.assert_almost_equal(tf_result.numpy(), tflite_result, decimal=5)
def _init_ctx(args):
ctx = None
if args.src_fwk in ("gluon", "mxnet", "keras") or args.dst_fwk in ("gluon", "mxnet", "keras"):
import mxnet as mx
ctx = mx.cpu()
return ctx
def _prepare_src_model(args, ctx, use_cuda):
return prepare_src_model(
src_fwk=args.src_fwk,
src_model=args.src_model,
src_params_file_path=args.src_params,
dst_fwk=args.dst_fwk,
ctx=ctx,
use_cuda=use_cuda,
load_ignore_extra=args.load_ignore_extra,
remove_module=args.remove_module,
num_classes=args.src_num_classes,
in_channels=args.src_in_channels)
def _prepare_dst_model(args, ctx, use_cuda):
return prepare_dst_model(
dst_fwk=args.dst_fwk,
dst_model=args.dst_model,
src_fwk=args.src_fwk,
ctx=ctx,
use_cuda=use_cuda,
num_classes=args.dst_num_classes,
in_channels=args.dst_in_channels,
model_type=args.model_type)
def update_and_initialize_logging(args):
"""
Update arguments ans initialize logging.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
packages = []
pip_packages = []
if (args.src_fwk == "gluon") or (args.dst_fwk == "gluon"):
packages += ["mxnet, numpy"]
pip_packages += ["mxnet-cu110", "mxnet-cu112"]
if (args.src_fwk == "pytorch") or (args.dst_fwk == "pytorch"):
packages += ["torch", "torchvision"]
if (args.src_fwk == "chainer") or (args.dst_fwk == "chainer"):
packages += ["chainer"]
pip_packages += ["cupy-cuda110", "cupy-cuda112", "chainer"]
if (args.src_fwk == "keras") or (args.dst_fwk == "keras"):
packages += ["keras"]
pip_packages += ["keras", "keras-mxnet", "mxnet-cu110", "mxnet-cu112"]
if (args.src_fwk == "tensorflow") or (args.dst_fwk == "tensorflow"):
packages += ["tensorflow-gpu"]
pip_packages += ["tensorflow", "tensorflow-gpu", "tensorpack"]
if (args.src_fwk == "tf2") or (args.dst_fwk == "tf2") or (args.dst_fwk == "tfl"):
packages += ["tensorflow"]
pip_packages += ["tensorflow", "tensorflow-gpu"]
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=packages,
log_pip_packages=pip_packages)
def main():
args = parse_args()
ctx = None
use_cuda = False
if args.dst_fwk == "tf2":
dst_params, dst_param_keys, dst_net = _prepare_dst_model(args, ctx, use_cuda)
update_and_initialize_logging(args=args)
ctx = _init_ctx(args)
src_params, src_param_keys, ext_src_param_keys, ext_src_param_keys2, src_net =\
_prepare_src_model(args, ctx, use_cuda)
if args.dst_fwk != "tf2":
dst_params, dst_param_keys, dst_net = _prepare_dst_model(args, ctx, use_cuda)
if ((args.dst_fwk in ["keras", "tensorflow", "tf2"]) and any([s.find("convgroup") >= 0 for s in dst_param_keys]))\
or ((args.src_fwk == "mxnet") and (args.src_model in ["crunet56", "crunet116", "preresnet269b"])):
assert (len(src_param_keys) <= len(dst_param_keys))
elif ((args.dst_fwk == "chainer") and
(args.src_model.startswith("diaresnet") or args.src_model.startswith("diapreresnet"))) or\
args.src_model.startswith("oth_ibppose"):
assert (len(src_param_keys) >= len(dst_param_keys))
elif args.dst_fwk == "tfl":
pass
else:
assert (len(src_param_keys) == len(dst_param_keys))
if args.src_fwk == "gluon" and args.dst_fwk == "gluon":
convert_gl2gl(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
finetune=((args.src_num_classes != args.dst_num_classes) or (args.src_in_channels != args.dst_in_channels)),
src_model=args.src_model,
ctx=ctx)
elif args.src_fwk == "pytorch" and args.dst_fwk == "pytorch":
convert_pt2pt(
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
src_model=args.src_model,
dst_model=args.dst_model)
elif args.src_fwk == "gluon" and args.dst_fwk == "pytorch":
convert_gl2pt(
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys)
elif args.src_fwk == "gluon" and args.dst_fwk == "chainer":
convert_gl2ch(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
ext_src_param_keys=ext_src_param_keys,
ext_src_param_keys2=ext_src_param_keys2,
src_model=args.src_model)
elif args.src_fwk == "gluon" and args.dst_fwk == "keras":
convert_gl2ke(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys)
elif args.src_fwk == "gluon" and args.dst_fwk == "tensorflow":
convert_gl2tf(
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys)
elif args.src_fwk == "gluon" and args.dst_fwk == "tf2":
convert_gl2tf2(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
src_model=args.src_model)
elif args.src_fwk == "pytorch" and args.dst_fwk == "gluon":
convert_pt2gl(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
ctx=ctx)
elif args.src_fwk == "mxnet" and args.dst_fwk == "gluon":
convert_mx2gl(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
src_model=args.src_model,
ctx=ctx)
elif args.src_fwk == "tensorflow" and args.dst_fwk == "tensorflow":
convert_tf2tf(
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys)
elif args.src_fwk == "tensorflow" and args.dst_fwk == "gluon":
convert_tf2gl(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
ctx=ctx)
elif args.src_fwk == "tf2" and args.dst_fwk == "tfl":
convert_tf22tfl(
src_net=src_net,
dst_params_file_path=args.dst_params)
else:
raise NotImplementedError
logging.info("Convert {}-model {} into {}-model {}".format(
args.src_fwk, args.src_model, args.dst_fwk, args.dst_model))
if __name__ == '__main__':
main()
| 87,933
| 51.435301
| 125
|
py
|
imgclsmob
|
imgclsmob-master/train_ch.py
|
"""
Script for training model on Chainer.
"""
import os
import argparse
import numpy as np
import chainer
from chainer import training
from chainer.training import extensions
from chainer.serializers import save_npz
from common.logger_utils import initialize_logging
from chainer_.utils import prepare_ch_context, prepare_model
from chainer_.dataset_utils import get_dataset_metainfo
from chainer_.dataset_utils import get_train_data_source, get_val_data_source
def add_train_cls_parser_arguments(parser):
"""
Create python script parameters (for training/classification specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--resume-state",
type=str,
default="",
help="resume from previously saved optimizer state if not None")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--num-epochs",
type=int,
default=120,
help="number of training epochs.")
parser.add_argument(
"--start-epoch",
type=int,
default=1,
help="starting epoch for resuming, default is 1 for new training")
parser.add_argument(
"--attempt",
type=int,
default=1,
help="current attempt number for training")
parser.add_argument(
"--optimizer-name",
type=str,
default="nag",
help="optimizer name")
parser.add_argument(
"--lr",
type=float,
default=0.1,
help="learning rate")
parser.add_argument(
"--lr-mode",
type=str,
default="cosine",
help="learning rate scheduler mode. options are step, poly and cosine")
parser.add_argument(
"--lr-decay",
type=float,
default=0.1,
help="decay rate of learning rate")
parser.add_argument(
"--lr-decay-period",
type=int,
default=0,
help="interval for periodic learning rate decays. default is 0 to disable")
parser.add_argument(
"--lr-decay-epoch",
type=str,
default="40,60",
help="epoches at which learning rate decays")
parser.add_argument(
"--target-lr",
type=float,
default=1e-8,
help="ending learning rate")
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="momentum value for optimizer")
parser.add_argument(
"--wd",
type=float,
default=0.0001,
help="weight decay rate")
parser.add_argument(
"--log-interval",
type=int,
default=50,
help="number of batches to wait before logging")
parser.add_argument(
"--save-interval",
type=int,
default=4,
help="saving parameters epoch interval, best model will always be saved")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--seed",
type=int,
default=-1,
help="Random seed to be fixed")
parser.add_argument(
"--log-packages",
type=str,
default="chainer, chainercv",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="cupy-cuda110, chainer, chainercv",
help="list of pip packages for logging")
def parse_args():
"""
Parse python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Train a model for image classification/segmentation (Chainer)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K",
help="dataset name. options are ImageNet1K, CUB200_2011, CIFAR10, CIFAR100, SVHN, VOC2012, ADE20K, Cityscapes, "
"COCO")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_train_cls_parser_arguments(parser)
args = parser.parse_args()
return args
def init_rand(seed):
if seed <= 0:
seed = np.random.randint(10000)
return seed
def prepare_trainer(net,
optimizer_name,
lr,
momentum,
num_epochs,
train_data,
val_data,
logging_dir_path,
use_gpus):
if optimizer_name == "sgd":
optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=momentum)
elif optimizer_name == "nag":
optimizer = chainer.optimizers.NesterovAG(lr=lr, momentum=momentum)
else:
raise Exception("Unsupported optimizer: {}".format(optimizer_name))
optimizer.setup(net)
# devices = tuple(range(num_gpus)) if num_gpus > 0 else (-1, )
devices = (0,) if use_gpus else (-1,)
updater = training.updaters.StandardUpdater(
iterator=train_data["iterator"],
optimizer=optimizer,
device=devices[0])
trainer = training.Trainer(
updater=updater,
stop_trigger=(num_epochs, "epoch"),
out=logging_dir_path)
val_interval = 100000, "iteration"
log_interval = 1000, "iteration"
trainer.extend(
extension=extensions.Evaluator(
iterator=val_data["iterator"],
target=net,
device=devices[0]),
trigger=val_interval)
trainer.extend(extensions.dump_graph("main/loss"))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(
extensions.snapshot_object(
net,
"model_iter_{.updater.iteration}"),
trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(
extensions.PrintReport([
"epoch", "iteration", "main/loss", "validation/main/loss", "main/accuracy", "validation/main/accuracy",
"lr"]),
trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
return trainer
def save_params(file_stem,
net,
trainer):
save_npz(
file=file_stem + ".npz",
obj=net)
save_npz(
file=file_stem + ".states",
obj=trainer)
def main():
"""
Main body of script.
"""
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
use_gpus = prepare_ch_context(args.num_gpus)
# batch_size = args.batch_size
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
use_gpus=use_gpus,
num_classes=args.num_classes,
in_channels=args.in_channels)
assert (hasattr(net, "classes"))
assert (hasattr(net, "in_size"))
train_data = get_train_data_source(
ds_metainfo=ds_metainfo,
batch_size=args.batch_size,
num_workers=args.num_workers)
val_data = get_val_data_source(
ds_metainfo=ds_metainfo,
batch_size=args.batch_size,
num_workers=args.num_workers)
trainer = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
lr=args.lr,
momentum=args.momentum,
num_epochs=args.num_epochs,
train_data=train_data,
val_data=val_data,
logging_dir_path=args.save_dir,
use_gpus=use_gpus)
trainer.run()
if __name__ == "__main__":
main()
| 9,406
| 27.506061
| 120
|
py
|
imgclsmob
|
imgclsmob-master/eval_tf.py
|
"""
Script for evaluating trained model on TensorFlow (validate/test).
"""
import argparse
import tqdm
import time
import logging
from tensorpack.predict import PredictConfig, FeedfreePredictor
from tensorpack.utils.stats import RatioCounter
from tensorpack.input_source import QueueInput, StagingInput
from common.logger_utils import initialize_logging
from tensorflow_.utils_tp import prepare_tf_context, prepare_model, get_data, calc_flops
def parse_args():
"""
Parse python script parameters.
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate a model for image classification (TensorFlow/TensorPack)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--data-dir",
type=str,
default="../imgclsmob_data/imagenet",
help="training and validation pictures to use")
parser.add_argument(
"--data-format",
type=str,
default="channels_last",
help="ordering of the dimensions in tensors. options are channels_last and channels_first")
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--calc-flops",
dest="calc_flops",
action="store_true",
help="calculate FLOPs")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=0.875,
help="inverted ratio for input image crop")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="tensorflow, tensorflow-gpu",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="tensorflow, tensorflow-gpu, tensorpack",
help="list of pip packages for logging")
args = parser.parse_args()
return args
def test(net,
session_init,
val_dataflow,
do_calc_flops=False,
extended_log=False):
"""
Main test routine.
Parameters:
----------
net : obj
Model.
session_init : SessionInit
Session initializer.
do_calc_flops : bool, default False
Whether to calculate count of weights.
extended_log : bool, default False
Whether to log more precise accuracy values.
"""
pred_config = PredictConfig(
model=net,
session_init=session_init,
input_names=["input", "label"],
output_names=["wrong-top1", "wrong-top5"]
)
err_top1 = RatioCounter()
err_top5 = RatioCounter()
tic = time.time()
pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(val_dataflow), device="/gpu:0"))
for _ in tqdm.trange(val_dataflow.size()):
err_top1_val, err_top5_val = pred()
batch_size = err_top1_val.shape[0]
err_top1.feed(err_top1_val.sum(), batch_size)
err_top5.feed(err_top5_val.sum(), batch_size)
err_top1_val = err_top1.ratio
err_top5_val = err_top5.ratio
if extended_log:
logging.info("Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})".format(
top1=err_top1_val, top5=err_top5_val))
else:
logging.info("Test: err-top1={top1:.4f}\terr-top5={top5:.4f}".format(
top1=err_top1_val, top5=err_top5_val))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
if do_calc_flops:
calc_flops(model=net)
def main():
"""
Main body of script.
"""
args = parse_args()
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
batch_size = prepare_tf_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net, inputs_desc = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
data_format=args.data_format)
val_dataflow = get_data(
is_train=False,
batch_size=batch_size,
data_dir_path=args.data_dir,
input_image_size=net.image_size,
resize_inv_factor=args.resize_inv_factor)
assert (args.use_pretrained or args.resume.strip())
test(
net=net,
session_init=inputs_desc,
val_dataflow=val_dataflow,
do_calc_flops=args.calc_flops,
extended_log=True)
if __name__ == "__main__":
main()
| 5,824
| 27.004808
| 99
|
py
|
imgclsmob
|
imgclsmob-master/train_gl_mealv2.py
|
"""
Script for training model on MXNet/Gluon.
"""
import argparse
import time
import logging
import os
import random
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet import autograd as ag
from common.logger_utils import initialize_logging
from common.train_log_param_saver import TrainLogParamSaver
from gluon.lr_scheduler import LRScheduler
from gluon.utils import prepare_mx_context, prepare_model, validate
from gluon.utils import report_accuracy, get_composite_metric, get_metric_name, get_initializer, get_loss
from gluon.metrics.metrics import LossValue
from gluon.dataset_utils import get_dataset_metainfo
from gluon.dataset_utils import get_train_data_source, get_val_data_source
from gluon.dataset_utils import get_batch_fn
from gluon.gluoncv2.models.common import Concurrent
from gluon.distillation import MealDiscriminator, MealAdvLoss
def add_train_cls_parser_arguments(parser):
"""
Create python script parameters (for training/classification specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--teacher-models",
type=str,
help="teacher model names to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="data type for training")
parser.add_argument(
'--not-hybridize',
action='store_true',
help='do not hybridize model')
parser.add_argument(
'--not-discriminator',
action='store_true',
help='do not use discriminator')
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--resume-state",
type=str,
default="",
help="resume from previously saved optimizer state if not None")
parser.add_argument(
"--initializer",
type=str,
default="MSRAPrelu",
help="initializer name. options are MSRAPrelu, Xavier and Xavier-gaussian-out-2")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--batch-size-scale",
type=int,
default=1,
help="manual batch-size increasing factor")
parser.add_argument(
"--num-epochs",
type=int,
default=120,
help="number of training epochs")
parser.add_argument(
"--start-epoch",
type=int,
default=1,
help="starting epoch for resuming, default is 1 for new training")
parser.add_argument(
"--attempt",
type=int,
default=1,
help="current attempt number for training")
parser.add_argument(
"--optimizer-name",
type=str,
default="nag",
help="optimizer name")
parser.add_argument(
"--lr",
type=float,
default=0.1,
help="learning rate")
parser.add_argument(
"--dlr-factor",
type=float,
default=1.0,
help="discriminator learning rate factor")
parser.add_argument(
"--lr-mode",
type=str,
default="cosine",
help="learning rate scheduler mode. options are step, poly and cosine")
parser.add_argument(
"--lr-decay",
type=float,
default=0.1,
help="decay rate of learning rate")
parser.add_argument(
"--lr-decay-period",
type=int,
default=0,
help="interval for periodic learning rate decays. default is 0 to disable")
parser.add_argument(
"--lr-decay-epoch",
type=str,
default="40,60",
help="epoches at which learning rate decays")
parser.add_argument(
"--target-lr",
type=float,
default=1e-8,
help="ending learning rate")
parser.add_argument(
"--poly-power",
type=float,
default=2,
help="power value for poly LR scheduler")
parser.add_argument(
"--warmup-epochs",
type=int,
default=0,
help="number of warmup epochs")
parser.add_argument(
"--warmup-lr",
type=float,
default=1e-8,
help="starting warmup learning rate")
parser.add_argument(
"--warmup-mode",
type=str,
default="linear",
help="learning rate scheduler warmup mode. options are linear, poly and constant")
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="momentum value for optimizer")
parser.add_argument(
"--wd",
type=float,
default=0.0001,
help="weight decay rate")
parser.add_argument(
"--gamma-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for batchnorm gamma")
parser.add_argument(
"--beta-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for batchnorm beta")
parser.add_argument(
"--bias-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for bias")
parser.add_argument(
"--grad-clip",
type=float,
default=None,
help="max_norm for gradient clipping")
parser.add_argument(
"--label-smoothing",
action="store_true",
help="use label smoothing")
parser.add_argument(
"--mixup",
action="store_true",
help="use mixup strategy")
parser.add_argument(
"--mixup-epoch-tail",
type=int,
default=12,
help="number of epochs without mixup at the end of training")
parser.add_argument(
"--log-interval",
type=int,
default=50,
help="number of batches to wait before logging")
parser.add_argument(
"--save-interval",
type=int,
default=4,
help="saving parameters epoch interval, best model will always be saved")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--seed",
type=int,
default=-1,
help="random seed to be fixed")
parser.add_argument(
"--log-packages",
type=str,
default="mxnet, numpy",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="mxnet-cu110, mxnet-cu112",
help="list of pip packages for logging")
parser.add_argument(
"--tune-layers",
type=str,
default="",
help="regexp for selecting layers for fine tuning")
def parse_args():
"""
Parse python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Train a model for image classification (Gluon)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K_rec",
help="dataset name. options are ImageNet1K, ImageNet1K_rec, CUB200_2011, CIFAR10, CIFAR100, SVHN")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_train_cls_parser_arguments(parser)
args = parser.parse_args()
return args
def init_rand(seed):
"""
Initialize all random generators by seed.
Parameters:
----------
seed : int
Seed value.
Returns:
-------
int
Generated seed value.
"""
if seed <= 0:
seed = np.random.randint(10000)
random.seed(seed)
np.random.seed(seed)
mx.random.seed(seed)
return seed
def prepare_trainer(net,
optimizer_name,
wd,
momentum,
lr_mode,
lr,
lr_decay_period,
lr_decay_epoch,
lr_decay,
target_lr,
poly_power,
warmup_epochs,
warmup_lr,
warmup_mode,
batch_size,
num_epochs,
num_training_samples,
dtype,
gamma_wd_mult=1.0,
beta_wd_mult=1.0,
bias_wd_mult=1.0,
state_file_path=None):
"""
Prepare trainer.
Parameters:
----------
net : HybridBlock
Model.
optimizer_name : str
Name of optimizer.
wd : float
Weight decay rate.
momentum : float
Momentum value.
lr_mode : str
Learning rate scheduler mode.
lr : float
Learning rate.
lr_decay_period : int
Interval for periodic learning rate decays.
lr_decay_epoch : str
Epoches at which learning rate decays.
lr_decay : float
Decay rate of learning rate.
target_lr : float
Final learning rate.
poly_power : float
Power value for poly LR scheduler.
warmup_epochs : int
Number of warmup epochs.
warmup_lr : float
Starting warmup learning rate.
warmup_mode : str
Learning rate scheduler warmup mode.
batch_size : int
Training batch size.
num_epochs : int
Number of training epochs.
num_training_samples : int
Number of training samples in dataset.
dtype : str
Base data type for tensors.
gamma_wd_mult : float
Weight decay multiplier for batchnorm gamma.
beta_wd_mult : float
Weight decay multiplier for batchnorm beta.
bias_wd_mult : float
Weight decay multiplier for bias.
state_file_path : str, default None
Path for file with trainer state.
Returns:
-------
Trainer
Trainer.
LRScheduler
Learning rate scheduler.
"""
if gamma_wd_mult != 1.0:
for k, v in net.collect_params(".*gamma").items():
v.wd_mult = gamma_wd_mult
if beta_wd_mult != 1.0:
for k, v in net.collect_params(".*beta").items():
v.wd_mult = beta_wd_mult
if bias_wd_mult != 1.0:
for k, v in net.collect_params(".*bias").items():
v.wd_mult = bias_wd_mult
if lr_decay_period > 0:
lr_decay_epoch = list(range(lr_decay_period, num_epochs, lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in lr_decay_epoch.split(",")]
num_batches = num_training_samples // batch_size
lr_scheduler = LRScheduler(
mode=lr_mode,
base_lr=lr,
n_iters=num_batches,
n_epochs=num_epochs,
step=lr_decay_epoch,
step_factor=lr_decay,
target_lr=target_lr,
power=poly_power,
warmup_epochs=warmup_epochs,
warmup_lr=warmup_lr,
warmup_mode=warmup_mode)
optimizer_params = {"learning_rate": lr,
"wd": wd,
"momentum": momentum,
"lr_scheduler": lr_scheduler}
if dtype != "float32":
optimizer_params["multi_precision"] = True
trainer = gluon.Trainer(
params=net.collect_params(),
optimizer=optimizer_name,
optimizer_params=optimizer_params)
if (state_file_path is not None) and state_file_path and os.path.exists(state_file_path):
logging.info("Loading trainer states: {}".format(state_file_path))
trainer.load_states(state_file_path)
if trainer._optimizer.wd != wd:
trainer._optimizer.wd = wd
logging.info("Reset the weight decay: {}".format(wd))
# lr_scheduler = trainer._optimizer.lr_scheduler
trainer._optimizer.lr_scheduler = lr_scheduler
return trainer, lr_scheduler
def save_params(file_stem,
net,
trainer):
"""
Save current model/trainer parameters.
Parameters:
----------
file_stem : str
File stem (with path).
net : HybridBlock
Model.
trainer : Trainer
Trainer.
"""
net.save_parameters(file_stem + ".params")
trainer.save_states(file_stem + ".states")
def train_epoch(epoch,
net,
teacher_net,
discrim_net,
train_metric,
loss_metrics,
train_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx,
loss_func,
discrim_loss_func,
trainer,
lr_scheduler,
batch_size,
log_interval,
mixup,
mixup_epoch_tail,
label_smoothing,
num_classes,
num_epochs,
grad_clip_value,
batch_size_scale):
"""
Train model on particular epoch.
Parameters:
----------
epoch : int
Epoch number.
net : HybridBlock
Model.
teacher_net : HybridBlock or None
Teacher model.
discrim_net : HybridBlock or None
MEALv2 discriminator model.
train_metric : EvalMetric
Metric object instance.
loss_metric : list of EvalMetric
Metric object instances (loss values).
train_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator.
batch_fn : func
Function for splitting data after extraction from data loader.
data_source_needs_reset : bool
Whether to reset data (if test_data is ImageRecordIter).
dtype : str
Base data type for tensors.
ctx : Context
MXNet context.
loss_func : Loss
Loss function.
discrim_loss_func : Loss or None
MEALv2 adversarial loss function.
trainer : Trainer
Trainer.
lr_scheduler : LRScheduler
Learning rate scheduler.
batch_size : int
Training batch size.
log_interval : int
Batch count period for logging.
mixup : bool
Whether to use mixup.
mixup_epoch_tail : int
Number of epochs without mixup at the end of training.
label_smoothing : bool
Whether to use label-smoothing.
num_classes : int
Number of model classes.
num_epochs : int
Number of training epochs.
grad_clip_value : float
Threshold for gradient clipping.
batch_size_scale : int
Manual batch-size increasing factor.
Returns:
-------
float
Loss value.
"""
labels_list_inds = None
batch_size_extend_count = 0
tic = time.time()
if data_source_needs_reset:
train_data.reset()
train_metric.reset()
for m in loss_metrics:
m.reset()
i = 0
btic = time.time()
for i, batch in enumerate(train_data):
data_list, labels_list = batch_fn(batch, ctx)
labels_one_hot = False
if teacher_net is not None:
labels_list = [teacher_net(x.astype(dtype, copy=False)).softmax(axis=-1).mean(axis=1) for x in data_list]
labels_list_inds = [y.argmax(axis=-1) for y in labels_list]
labels_one_hot = True
if label_smoothing and not (teacher_net is not None):
eta = 0.1
on_value = 1 - eta + eta / num_classes
off_value = eta / num_classes
if not labels_one_hot:
labels_list_inds = labels_list
labels_list = [y.one_hot(depth=num_classes, on_value=on_value, off_value=off_value)
for y in labels_list]
labels_one_hot = True
if mixup:
if not labels_one_hot:
labels_list_inds = labels_list
labels_list = [y.one_hot(depth=num_classes) for y in labels_list]
labels_one_hot = True
if epoch < num_epochs - mixup_epoch_tail:
alpha = 1
lam = np.random.beta(alpha, alpha)
data_list = [lam * x + (1 - lam) * x[::-1] for x in data_list]
labels_list = [lam * y + (1 - lam) * y[::-1] for y in labels_list]
with ag.record():
outputs_list = [net(x.astype(dtype, copy=False)) for x in data_list]
loss_list = [loss_func(yhat, y.astype(dtype, copy=False)) for yhat, y in zip(outputs_list, labels_list)]
if discrim_net is not None:
d_pred_list = [discrim_net(yhat.astype(dtype, copy=False).softmax()) for yhat in outputs_list]
d_label_list = [discrim_net(y.astype(dtype, copy=False)) for y in labels_list]
d_loss_list = [discrim_loss_func(yhat, y) for yhat, y in zip(d_pred_list, d_label_list)]
loss_list = [z + dz for z, dz in zip(loss_list, d_loss_list)]
for loss in loss_list:
loss.backward()
lr_scheduler.update(i, epoch)
if grad_clip_value is not None:
grads = [v.grad(ctx[0]) for v in net.collect_params().values() if v._grad is not None]
gluon.utils.clip_global_norm(grads, max_norm=grad_clip_value)
if batch_size_scale == 1:
trainer.step(batch_size)
else:
if (i + 1) % batch_size_scale == 0:
batch_size_extend_count = 0
trainer.step(batch_size * batch_size_scale)
for p in net.collect_params().values():
p.zero_grad()
else:
batch_size_extend_count += 1
train_metric.update(
labels=(labels_list if not labels_one_hot else labels_list_inds),
preds=outputs_list)
loss_metrics[0].update(labels=None, preds=loss_list)
if (discrim_net is not None) and (len(loss_metrics) > 1):
loss_metrics[1].update(labels=None, preds=d_loss_list)
if log_interval and not (i + 1) % log_interval:
speed = batch_size * log_interval / (time.time() - btic)
btic = time.time()
train_accuracy_msg = report_accuracy(metric=train_metric)
loss_accuracy_msg = report_accuracy(metric=loss_metrics[0])
if (discrim_net is not None) and (len(loss_metrics) > 1):
dloss_accuracy_msg = report_accuracy(metric=loss_metrics[1])
logging.info("Epoch[{}] Batch [{}]\tSpeed: {:.2f} samples/sec\t{}\t{}\t{}\tlr={:.5f}".format(
epoch + 1, i, speed, train_accuracy_msg, loss_accuracy_msg, dloss_accuracy_msg,
trainer.learning_rate))
else:
logging.info("Epoch[{}] Batch [{}]\tSpeed: {:.2f} samples/sec\t{}\t{}\tlr={:.5f}".format(
epoch + 1, i, speed, train_accuracy_msg, loss_accuracy_msg, trainer.learning_rate))
if (batch_size_scale != 1) and (batch_size_extend_count > 0):
trainer.step(batch_size * batch_size_extend_count)
for p in net.collect_params().values():
p.zero_grad()
throughput = int(batch_size * (i + 1) / (time.time() - tic))
logging.info("[Epoch {}] speed: {:.2f} samples/sec\ttime cost: {:.2f} sec".format(
epoch + 1, throughput, time.time() - tic))
train_accuracy_msg = report_accuracy(metric=train_metric)
loss_accuracy_msg = report_accuracy(metric=loss_metrics[0])
if (discrim_net is not None) and (len(loss_metrics) > 1):
dloss_accuracy_msg = report_accuracy(metric=loss_metrics[1])
logging.info("[Epoch {}] training: {}\t{}\t{}".format(epoch + 1, train_accuracy_msg, loss_accuracy_msg,
dloss_accuracy_msg))
else:
logging.info("[Epoch {}] training: {}\t{}".format(epoch + 1, train_accuracy_msg, loss_accuracy_msg))
def train_net(batch_size,
num_epochs,
start_epoch1,
train_data,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
net,
teacher_net,
discrim_net,
trainer,
lr_scheduler,
lp_saver,
log_interval,
mixup,
mixup_epoch_tail,
label_smoothing,
num_classes,
grad_clip_value,
batch_size_scale,
val_metric,
train_metric,
loss_metrics,
loss_func,
discrim_loss_func,
ctx):
"""
Main procedure for training model.
Parameters:
----------
batch_size : int
Training batch size.
num_epochs : int
Number of training epochs.
start_epoch1 : int
Number of starting epoch (1-based).
train_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator (training subset).
val_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator (validation subset).
batch_fn : func
Function for splitting data after extraction from data loader.
data_source_needs_reset : bool
Whether to reset data (if test_data is ImageRecordIter).
dtype : str
Base data type for tensors.
net : HybridBlock
Model.
teacher_net : HybridBlock or None
Teacher model.
discrim_net : HybridBlock or None
MEALv2 discriminator model.
trainer : Trainer
Trainer.
lr_scheduler : LRScheduler
Learning rate scheduler.
lp_saver : TrainLogParamSaver
Model/trainer state saver.
log_interval : int
Batch count period for logging.
mixup : bool
Whether to use mixup.
mixup_epoch_tail : int
Number of epochs without mixup at the end of training.
label_smoothing : bool
Whether to use label-smoothing.
num_classes : int
Number of model classes.
grad_clip_value : float
Threshold for gradient clipping.
batch_size_scale : int
Manual batch-size increasing factor.
val_metric : EvalMetric
Metric object instance (validation subset).
train_metric : EvalMetric
Metric object instance (training subset).
loss_metrics : list of EvalMetric
Metric object instances (loss values).
loss_func : Loss
Loss object instance.
discrim_loss_func : Loss or None
MEALv2 adversarial loss function.
ctx : Context
MXNet context.
"""
if batch_size_scale != 1:
for p in net.collect_params().values():
p.grad_req = "add"
if isinstance(ctx, mx.Context):
ctx = [ctx]
# loss_func = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=(not (mixup or label_smoothing)))
assert (type(start_epoch1) == int)
assert (start_epoch1 >= 1)
if start_epoch1 > 1:
logging.info("Start training from [Epoch {}]".format(start_epoch1))
validate(
metric=val_metric,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
val_accuracy_msg = report_accuracy(metric=val_metric)
logging.info("[Epoch {}] validation: {}".format(start_epoch1 - 1, val_accuracy_msg))
gtic = time.time()
for epoch in range(start_epoch1 - 1, num_epochs):
train_epoch(
epoch=epoch,
net=net,
teacher_net=teacher_net,
discrim_net=discrim_net,
train_metric=train_metric,
loss_metrics=loss_metrics,
train_data=train_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx,
loss_func=loss_func,
discrim_loss_func=discrim_loss_func,
trainer=trainer,
lr_scheduler=lr_scheduler,
batch_size=batch_size,
log_interval=log_interval,
mixup=mixup,
mixup_epoch_tail=mixup_epoch_tail,
label_smoothing=label_smoothing,
num_classes=num_classes,
num_epochs=num_epochs,
grad_clip_value=grad_clip_value,
batch_size_scale=batch_size_scale)
validate(
metric=val_metric,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
val_accuracy_msg = report_accuracy(metric=val_metric)
logging.info("[Epoch {}] validation: {}".format(epoch + 1, val_accuracy_msg))
if lp_saver is not None:
lp_saver_kwargs = {"net": net, "trainer": trainer}
val_acc_values = val_metric.get()[1]
train_acc_values = train_metric.get()[1]
val_acc_values = val_acc_values if type(val_acc_values) == list else [val_acc_values]
train_acc_values = train_acc_values if type(train_acc_values) == list else [train_acc_values]
lp_saver.epoch_test_end_callback(
epoch1=(epoch + 1),
params=(val_acc_values + train_acc_values + [loss_metrics[0].get()[1], trainer.learning_rate]),
**lp_saver_kwargs)
logging.info("Total time cost: {:.2f} sec".format(time.time() - gtic))
if lp_saver is not None:
opt_metric_name = get_metric_name(val_metric, lp_saver.acc_ind)
logging.info("Best {}: {:.4f} at {} epoch".format(
opt_metric_name, lp_saver.best_eval_metric_value, lp_saver.best_eval_metric_epoch))
def main():
"""
Main body of script.
"""
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
ctx, batch_size = prepare_mx_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
use_teacher = (args.teacher_models is not None) and (args.teacher_models.strip() != "")
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
dtype=args.dtype,
net_extra_kwargs=ds_metainfo.train_net_extra_kwargs,
tune_layers=args.tune_layers,
classes=args.num_classes,
in_channels=args.in_channels,
do_hybridize=(not args.not_hybridize),
initializer=get_initializer(initializer_name=args.initializer),
ctx=ctx)
assert (hasattr(net, "classes"))
num_classes = net.classes
teacher_net = None
discrim_net = None
discrim_loss_func = None
if use_teacher:
teacher_nets = []
for teacher_model in args.teacher_models.split(","):
teacher_net = prepare_model(
model_name=teacher_model.strip(),
use_pretrained=True,
pretrained_model_file_path="",
dtype=args.dtype,
net_extra_kwargs=ds_metainfo.train_net_extra_kwargs,
do_hybridize=(not args.not_hybridize),
ctx=ctx)
assert (teacher_net.classes == net.classes)
assert (teacher_net.in_size == net.in_size)
teacher_nets.append(teacher_net)
if len(teacher_nets) > 0:
teacher_net = Concurrent(stack=True, prefix="", branches=teacher_nets)
for k, v in teacher_net.collect_params().items():
v.grad_req = "null"
if not args.not_discriminator:
discrim_net = MealDiscriminator()
discrim_net.cast(args.dtype)
if not args.not_hybridize:
discrim_net.hybridize(
static_alloc=True,
static_shape=True)
discrim_net.initialize(mx.init.MSRAPrelu(), ctx=ctx)
for k, v in discrim_net.collect_params().items():
v.lr_mult = args.dlr_factor
discrim_loss_func = MealAdvLoss()
train_data = get_train_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=args.num_workers)
val_data = get_val_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=args.num_workers)
batch_fn = get_batch_fn(ds_metainfo=ds_metainfo)
num_training_samples = len(train_data._dataset) if not ds_metainfo.use_imgrec else ds_metainfo.num_training_samples
trainer, lr_scheduler = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
wd=args.wd,
momentum=args.momentum,
lr_mode=args.lr_mode,
lr=args.lr,
lr_decay_period=args.lr_decay_period,
lr_decay_epoch=args.lr_decay_epoch,
lr_decay=args.lr_decay,
target_lr=args.target_lr,
poly_power=args.poly_power,
warmup_epochs=args.warmup_epochs,
warmup_lr=args.warmup_lr,
warmup_mode=args.warmup_mode,
batch_size=batch_size,
num_epochs=args.num_epochs,
num_training_samples=num_training_samples,
dtype=args.dtype,
gamma_wd_mult=args.gamma_wd_mult,
beta_wd_mult=args.beta_wd_mult,
bias_wd_mult=args.bias_wd_mult,
state_file_path=args.resume_state)
if args.save_dir and args.save_interval:
param_names = ds_metainfo.val_metric_capts + ds_metainfo.train_metric_capts + ["Train.Loss", "LR"]
lp_saver = TrainLogParamSaver(
checkpoint_file_name_prefix="{}_{}".format(ds_metainfo.short_label, args.model),
last_checkpoint_file_name_suffix="last",
best_checkpoint_file_name_suffix=None,
last_checkpoint_dir_path=args.save_dir,
best_checkpoint_dir_path=None,
last_checkpoint_file_count=2,
best_checkpoint_file_count=2,
checkpoint_file_save_callback=save_params,
checkpoint_file_exts=(".params", ".states"),
save_interval=args.save_interval,
num_epochs=args.num_epochs,
param_names=param_names,
acc_ind=ds_metainfo.saver_acc_ind,
# bigger=[True],
# mask=None,
score_log_file_path=os.path.join(args.save_dir, "score.log"),
score_log_attempt_value=args.attempt,
best_map_log_file_path=os.path.join(args.save_dir, "best_map.log"))
else:
lp_saver = None
val_metric = get_composite_metric(ds_metainfo.val_metric_names, ds_metainfo.val_metric_extra_kwargs)
train_metric = get_composite_metric(ds_metainfo.train_metric_names, ds_metainfo.train_metric_extra_kwargs)
loss_metrics = [LossValue(name="loss"), LossValue(name="dloss")]
loss_kwargs = {"sparse_label": (not (args.mixup or args.label_smoothing) and
not (use_teacher and (teacher_net is not None)))}
if ds_metainfo.loss_extra_kwargs is not None:
loss_kwargs.update(ds_metainfo.loss_extra_kwargs)
loss_func = get_loss(ds_metainfo.loss_name, loss_kwargs)
train_net(
batch_size=batch_size,
num_epochs=args.num_epochs,
start_epoch1=args.start_epoch,
train_data=train_data,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=ds_metainfo.use_imgrec,
dtype=args.dtype,
net=net,
teacher_net=teacher_net,
discrim_net=discrim_net,
trainer=trainer,
lr_scheduler=lr_scheduler,
lp_saver=lp_saver,
log_interval=args.log_interval,
mixup=args.mixup,
mixup_epoch_tail=args.mixup_epoch_tail,
label_smoothing=args.label_smoothing,
num_classes=num_classes,
grad_clip_value=args.grad_clip,
batch_size_scale=args.batch_size_scale,
val_metric=val_metric,
train_metric=train_metric,
loss_metrics=loss_metrics,
loss_func=loss_func,
discrim_loss_func=discrim_loss_func,
ctx=ctx)
if __name__ == "__main__":
main()
| 33,553
| 32.188922
| 119
|
py
|
imgclsmob
|
imgclsmob-master/train_pt.py
|
"""
Script for training model on PyTorch.
"""
import os
import time
import logging
import argparse
import random
import numpy as np
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.utils.data
from common.logger_utils import initialize_logging
from common.train_log_param_saver import TrainLogParamSaver
from pytorch.utils import prepare_pt_context, prepare_model, validate
from pytorch.utils import report_accuracy, get_composite_metric, get_metric_name
from pytorch.dataset_utils import get_dataset_metainfo
from pytorch.dataset_utils import get_train_data_source, get_val_data_source
def add_train_cls_parser_arguments(parser):
"""
Create python script parameters (for training/classification specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--resume-state",
type=str,
default="",
help="resume from previously saved optimizer state if not None")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--batch-size-scale",
type=int,
default=1,
help="manual batch-size increasing factor")
parser.add_argument(
"--num-epochs",
type=int,
default=120,
help="number of training epochs")
parser.add_argument(
"--start-epoch",
type=int,
default=1,
help="starting epoch for resuming, default is 1 for new training")
parser.add_argument(
"--attempt",
type=int,
default=1,
help="current attempt number for training")
parser.add_argument(
"--optimizer-name",
type=str,
default="nag",
help="optimizer name")
parser.add_argument(
"--lr",
type=float,
default=0.1,
help="learning rate")
parser.add_argument(
"--lr-mode",
type=str,
default="cosine",
help="learning rate scheduler mode. options are step, poly and cosine")
parser.add_argument(
"--lr-decay",
type=float,
default=0.1,
help="decay rate of learning rate")
parser.add_argument(
"--lr-decay-period",
type=int,
default=0,
help="interval for periodic learning rate decays. default is 0 to disable")
parser.add_argument(
"--lr-decay-epoch",
type=str,
default="40,60",
help="epoches at which learning rate decays")
parser.add_argument(
"--target-lr",
type=float,
default=1e-8,
help="ending learning rate")
parser.add_argument(
"--poly-power",
type=float,
default=2,
help="power value for poly LR scheduler")
parser.add_argument(
"--warmup-epochs",
type=int,
default=0,
help="number of warmup epochs")
parser.add_argument(
"--warmup-lr",
type=float,
default=1e-8,
help="starting warmup learning rate")
parser.add_argument(
"--warmup-mode",
type=str,
default="linear",
help="learning rate scheduler warmup mode. options are linear, poly and constant")
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="momentum value for optimizer")
parser.add_argument(
"--wd",
type=float,
default=0.0001,
help="weight decay rate")
parser.add_argument(
"--gamma-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for batchnorm gamma")
parser.add_argument(
"--beta-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for batchnorm beta")
parser.add_argument(
"--bias-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for bias")
parser.add_argument(
"--grad-clip",
type=float,
default=None,
help="max_norm for gradient clipping")
parser.add_argument(
"--label-smoothing",
action="store_true",
help="use label smoothing")
parser.add_argument(
"--mixup",
action="store_true",
help="use mixup strategy")
parser.add_argument(
"--mixup-epoch-tail",
type=int,
default=15,
help="number of epochs without mixup at the end of training")
parser.add_argument(
"--log-interval",
type=int,
default=50,
help="number of batches to wait before logging")
parser.add_argument(
"--save-interval",
type=int,
default=4,
help="saving parameters epoch interval, best model will always be saved")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--seed",
type=int,
default=-1,
help="Random seed to be fixed")
parser.add_argument(
"--log-packages",
type=str,
default="torch, torchvision",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="",
help="list of pip packages for logging")
parser.add_argument(
"--tune-layers",
type=str,
default="",
help="regexp for selecting layers for fine tuning")
def parse_args():
"""
Parse python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Train a model for image classification (PyTorch)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K",
help="dataset name. options are ImageNet1K, CUB200_2011, CIFAR10, CIFAR100, SVHN")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_train_cls_parser_arguments(parser)
args = parser.parse_args()
return args
def init_rand(seed):
"""
Initialize all random generators by seed.
Parameters:
----------
seed : int
Seed value.
Returns:
-------
int
Generated seed value.
"""
if seed <= 0:
seed = np.random.randint(10000)
else:
cudnn.deterministic = True
logging.warning(
"You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down "
"your training considerably! You may see unexpected behavior when restarting from checkpoints.")
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
return seed
def prepare_trainer(net,
optimizer_name,
wd,
momentum,
lr_mode,
lr,
lr_decay_period,
lr_decay_epoch,
lr_decay,
num_epochs,
state_file_path):
"""
Prepare trainer.
Parameters:
----------
net : Module
Model.
optimizer_name : str
Name of optimizer.
wd : float
Weight decay rate.
momentum : float
Momentum value.
lr_mode : str
Learning rate scheduler mode.
lr : float
Learning rate.
lr_decay_period : int
Interval for periodic learning rate decays.
lr_decay_epoch : str
Epoches at which learning rate decays.
lr_decay : float
Decay rate of learning rate.
num_epochs : int
Number of training epochs.
state_file_path : str
Path for file with trainer state.
Returns:
-------
Optimizer
Optimizer.
LRScheduler
Learning rate scheduler.
int
Start epoch.
"""
optimizer_name = optimizer_name.lower()
if (optimizer_name == "sgd") or (optimizer_name == "nag"):
optimizer = torch.optim.SGD(
params=net.parameters(),
lr=lr,
momentum=momentum,
weight_decay=wd,
nesterov=(optimizer_name == "nag"))
else:
raise ValueError("Usupported optimizer: {}".format(optimizer_name))
if state_file_path:
checkpoint = torch.load(state_file_path)
if type(checkpoint) == dict:
optimizer.load_state_dict(checkpoint["optimizer"])
start_epoch = checkpoint["epoch"]
else:
start_epoch = None
else:
start_epoch = None
cudnn.benchmark = True
lr_mode = lr_mode.lower()
if lr_decay_period > 0:
lr_decay_epoch = list(range(lr_decay_period, num_epochs, lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in lr_decay_epoch.split(",")]
if (lr_mode == "step") and (lr_decay_period != 0):
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer=optimizer,
step_size=lr_decay_period,
gamma=lr_decay,
last_epoch=-1)
elif (lr_mode == "multistep") or ((lr_mode == "step") and (lr_decay_period == 0)):
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer=optimizer,
milestones=lr_decay_epoch,
gamma=lr_decay,
last_epoch=-1)
elif lr_mode == "cosine":
for group in optimizer.param_groups:
group.setdefault("initial_lr", group["lr"])
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer=optimizer,
T_max=num_epochs,
last_epoch=(num_epochs - 1))
else:
raise ValueError("Usupported lr_scheduler: {}".format(lr_mode))
return optimizer, lr_scheduler, start_epoch
def save_params(file_stem,
state):
"""
Save current model/trainer parameters.
Parameters:
----------
file_stem : str
File stem (with path).
state : dict
Whole state of model & trainer.
trainer : Trainer
Trainer.
"""
torch.save(
obj=state["state_dict"],
f=(file_stem + ".pth"))
torch.save(
obj=state,
f=(file_stem + ".states"))
def train_epoch(epoch,
net,
train_metric,
train_data,
use_cuda,
L,
optimizer,
# lr_scheduler,
batch_size,
log_interval):
"""
Train model on particular epoch.
Parameters:
----------
epoch : int
Epoch number.
net : Module
Model.
train_metric : EvalMetric
Metric object instance.
train_data : DataLoader
Data loader.
use_cuda : bool
Whether to use CUDA.
L : Loss
Loss function.
optimizer : Optimizer
Optimizer.
batch_size : int
Training batch size.
log_interval : int
Batch count period for logging.
Returns:
-------
float
Loss value.
"""
tic = time.time()
net.train()
train_metric.reset()
train_loss = 0.0
btic = time.time()
for i, (data, target) in enumerate(train_data):
if use_cuda:
data = data.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
output = net(data)
loss = L(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_metric.update(
labels=target,
preds=output)
if log_interval and not (i + 1) % log_interval:
speed = batch_size * log_interval / (time.time() - btic)
btic = time.time()
train_accuracy_msg = report_accuracy(metric=train_metric)
logging.info("Epoch[{}] Batch [{}]\tSpeed: {:.2f} samples/sec\t{}\tlr={:.5f}".format(
epoch + 1, i, speed, train_accuracy_msg, optimizer.param_groups[0]["lr"]))
throughput = int(batch_size * (i + 1) / (time.time() - tic))
logging.info("[Epoch {}] speed: {:.2f} samples/sec\ttime cost: {:.2f} sec".format(
epoch + 1, throughput, time.time() - tic))
train_loss /= (i + 1)
train_accuracy_msg = report_accuracy(metric=train_metric)
logging.info("[Epoch {}] training: {}\tloss={:.4f}".format(
epoch + 1, train_accuracy_msg, train_loss))
return train_loss
def train_net(batch_size,
num_epochs,
start_epoch1,
train_data,
val_data,
net,
optimizer,
lr_scheduler,
lp_saver,
log_interval,
num_classes,
val_metric,
train_metric,
use_cuda):
"""
Main procedure for training model.
Parameters:
----------
batch_size : int
Training batch size.
num_epochs : int
Number of training epochs.
start_epoch1 : int
Number of starting epoch (1-based).
train_data : DataLoader
Data loader (training subset).
val_data : DataLoader
Data loader (validation subset).
net : Module
Model.
optimizer : Optimizer
Optimizer.
lr_scheduler : LRScheduler
Learning rate scheduler.
lp_saver : TrainLogParamSaver
Model/trainer state saver.
log_interval : int
Batch count period for logging.
num_classes : int
Number of model classes.
val_metric : EvalMetric
Metric object instance (validation subset).
train_metric : EvalMetric
Metric object instance (training subset).
use_cuda : bool
Whether to use CUDA.
"""
assert (num_classes > 0)
L = nn.CrossEntropyLoss()
if use_cuda:
L = L.cuda()
assert (type(start_epoch1) == int)
assert (start_epoch1 >= 1)
if start_epoch1 > 1:
logging.info("Start training from [Epoch {}]".format(start_epoch1))
validate(
metric=val_metric,
net=net,
val_data=val_data,
use_cuda=use_cuda)
val_accuracy_msg = report_accuracy(metric=val_metric)
logging.info("[Epoch {}] validation: {}".format(start_epoch1 - 1, val_accuracy_msg))
gtic = time.time()
for epoch in range(start_epoch1 - 1, num_epochs):
lr_scheduler.step()
train_loss = train_epoch(
epoch=epoch,
net=net,
train_metric=train_metric,
train_data=train_data,
use_cuda=use_cuda,
L=L,
optimizer=optimizer,
# lr_scheduler,
batch_size=batch_size,
log_interval=log_interval)
validate(
metric=val_metric,
net=net,
val_data=val_data,
use_cuda=use_cuda)
val_accuracy_msg = report_accuracy(metric=val_metric)
logging.info("[Epoch {}] validation: {}".format(epoch + 1, val_accuracy_msg))
if lp_saver is not None:
state = {
"epoch": epoch + 1,
"state_dict": net.state_dict(),
"optimizer": optimizer.state_dict(),
}
lp_saver_kwargs = {"state": state}
val_acc_values = val_metric.get()[1]
train_acc_values = train_metric.get()[1]
val_acc_values = val_acc_values if type(val_acc_values) == list else [val_acc_values]
train_acc_values = train_acc_values if type(train_acc_values) == list else [train_acc_values]
lp_saver.epoch_test_end_callback(
epoch1=(epoch + 1),
params=(val_acc_values + train_acc_values + [train_loss, optimizer.param_groups[0]["lr"]]),
**lp_saver_kwargs)
logging.info("Total time cost: {:.2f} sec".format(time.time() - gtic))
if lp_saver is not None:
opt_metric_name = get_metric_name(val_metric, lp_saver.acc_ind)
logging.info("Best {}: {:.4f} at {} epoch".format(
opt_metric_name, lp_saver.best_eval_metric_value, lp_saver.best_eval_metric_epoch))
def main():
"""
Main body of script.
"""
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
use_cuda, batch_size = prepare_pt_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
use_cuda=use_cuda)
real_net = net.module if hasattr(net, "module") else net
assert (hasattr(real_net, "num_classes"))
num_classes = real_net.num_classes
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
train_data = get_train_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=args.num_workers)
val_data = get_val_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=args.num_workers)
optimizer, lr_scheduler, start_epoch = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
wd=args.wd,
momentum=args.momentum,
lr_mode=args.lr_mode,
lr=args.lr,
lr_decay_period=args.lr_decay_period,
lr_decay_epoch=args.lr_decay_epoch,
lr_decay=args.lr_decay,
num_epochs=args.num_epochs,
state_file_path=args.resume_state)
if args.save_dir and args.save_interval:
param_names = ds_metainfo.val_metric_capts + ds_metainfo.train_metric_capts + ["Train.Loss", "LR"]
lp_saver = TrainLogParamSaver(
checkpoint_file_name_prefix="{}_{}".format(ds_metainfo.short_label, args.model),
last_checkpoint_file_name_suffix="last",
best_checkpoint_file_name_suffix=None,
last_checkpoint_dir_path=args.save_dir,
best_checkpoint_dir_path=None,
last_checkpoint_file_count=2,
best_checkpoint_file_count=2,
checkpoint_file_save_callback=save_params,
checkpoint_file_exts=(".pth", ".states"),
save_interval=args.save_interval,
num_epochs=args.num_epochs,
param_names=param_names,
acc_ind=ds_metainfo.saver_acc_ind,
# bigger=[True],
# mask=None,
score_log_file_path=os.path.join(args.save_dir, "score.log"),
score_log_attempt_value=args.attempt,
best_map_log_file_path=os.path.join(args.save_dir, "best_map.log"))
else:
lp_saver = None
train_net(
batch_size=batch_size,
num_epochs=args.num_epochs,
start_epoch1=args.start_epoch,
train_data=train_data,
val_data=val_data,
net=net,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
lp_saver=lp_saver,
log_interval=args.log_interval,
num_classes=num_classes,
val_metric=get_composite_metric(ds_metainfo.val_metric_names, ds_metainfo.val_metric_extra_kwargs),
train_metric=get_composite_metric(ds_metainfo.train_metric_names, ds_metainfo.train_metric_extra_kwargs),
use_cuda=use_cuda)
if __name__ == "__main__":
main()
| 20,958
| 28.519718
| 119
|
py
|
imgclsmob
|
imgclsmob-master/__init__.py
| 0
| 0
| 0
|
py
|
|
imgclsmob
|
imgclsmob-master/train_gl.py
|
"""
Script for training model on MXNet/Gluon.
"""
import argparse
import time
import logging
import os
import random
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet import autograd as ag
from common.logger_utils import initialize_logging
from common.train_log_param_saver import TrainLogParamSaver
from gluon.lr_scheduler import LRScheduler
from gluon.utils import prepare_mx_context, prepare_model, validate
from gluon.utils import report_accuracy, get_composite_metric, get_metric_name, get_initializer, get_loss
from gluon.dataset_utils import get_dataset_metainfo
from gluon.dataset_utils import get_train_data_source, get_val_data_source
from gluon.dataset_utils import get_batch_fn
def add_train_cls_parser_arguments(parser):
"""
Create python script parameters (for training/classification specific subpart).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
"""
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="data type for training")
parser.add_argument(
'--not-hybridize',
action='store_true',
help='do not hybridize model')
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters if not None")
parser.add_argument(
"--resume-state",
type=str,
default="",
help="resume from previously saved optimizer state if not None")
parser.add_argument(
"--initializer",
type=str,
default="MSRAPrelu",
help="initializer name. options are MSRAPrelu, Xavier and Xavier-gaussian-out-2")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--batch-size-scale",
type=int,
default=1,
help="manual batch-size increasing factor")
parser.add_argument(
"--num-epochs",
type=int,
default=120,
help="number of training epochs")
parser.add_argument(
"--start-epoch",
type=int,
default=1,
help="starting epoch for resuming, default is 1 for new training")
parser.add_argument(
"--attempt",
type=int,
default=1,
help="current attempt number for training")
parser.add_argument(
"--optimizer-name",
type=str,
default="nag",
help="optimizer name")
parser.add_argument(
"--lr",
type=float,
default=0.1,
help="learning rate")
parser.add_argument(
"--lr-mode",
type=str,
default="cosine",
help="learning rate scheduler mode. options are step, poly and cosine")
parser.add_argument(
"--lr-decay",
type=float,
default=0.1,
help="decay rate of learning rate")
parser.add_argument(
"--lr-decay-period",
type=int,
default=0,
help="interval for periodic learning rate decays. default is 0 to disable")
parser.add_argument(
"--lr-decay-epoch",
type=str,
default="40,60",
help="epoches at which learning rate decays")
parser.add_argument(
"--target-lr",
type=float,
default=1e-8,
help="ending learning rate")
parser.add_argument(
"--poly-power",
type=float,
default=2,
help="power value for poly LR scheduler")
parser.add_argument(
"--warmup-epochs",
type=int,
default=0,
help="number of warmup epochs")
parser.add_argument(
"--warmup-lr",
type=float,
default=1e-8,
help="starting warmup learning rate")
parser.add_argument(
"--warmup-mode",
type=str,
default="linear",
help="learning rate scheduler warmup mode. options are linear, poly and constant")
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="momentum value for optimizer")
parser.add_argument(
"--wd",
type=float,
default=0.0001,
help="weight decay rate")
parser.add_argument(
"--gamma-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for batchnorm gamma")
parser.add_argument(
"--beta-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for batchnorm beta")
parser.add_argument(
"--bias-wd-mult",
type=float,
default=1.0,
help="weight decay multiplier for bias")
parser.add_argument(
"--grad-clip",
type=float,
default=None,
help="max_norm for gradient clipping")
parser.add_argument(
"--label-smoothing",
action="store_true",
help="use label smoothing")
parser.add_argument(
"--mixup",
action="store_true",
help="use mixup strategy")
parser.add_argument(
"--mixup-epoch-tail",
type=int,
default=12,
help="number of epochs without mixup at the end of training")
parser.add_argument(
"--log-interval",
type=int,
default=50,
help="number of batches to wait before logging")
parser.add_argument(
"--save-interval",
type=int,
default=4,
help="saving parameters epoch interval, best model will always be saved")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--seed",
type=int,
default=-1,
help="random seed to be fixed")
parser.add_argument(
"--log-packages",
type=str,
default="mxnet, numpy",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="mxnet-cu110, mxnet-cu112",
help="list of pip packages for logging")
parser.add_argument(
"--tune-layers",
type=str,
default="",
help="regexp for selecting layers for fine tuning")
def parse_args():
"""
Parse python script parameters (common part).
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Train a model for image classification (Gluon)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="ImageNet1K_rec",
help="dataset name. options are ImageNet1K, ImageNet1K_rec, CUB200_2011, CIFAR10, CIFAR100, SVHN, LibriSpeech,"
" MCV")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_train_cls_parser_arguments(parser)
args = parser.parse_args()
return args
def init_rand(seed):
"""
Initialize all random generators by seed.
Parameters:
----------
seed : int
Seed value.
Returns:
-------
int
Generated seed value.
"""
if seed <= 0:
seed = np.random.randint(10000)
random.seed(seed)
np.random.seed(seed)
mx.random.seed(seed)
return seed
def prepare_trainer(net,
optimizer_name,
wd,
momentum,
lr_mode,
lr,
lr_decay_period,
lr_decay_epoch,
lr_decay,
target_lr,
poly_power,
warmup_epochs,
warmup_lr,
warmup_mode,
batch_size,
num_epochs,
num_training_samples,
dtype,
gamma_wd_mult=1.0,
beta_wd_mult=1.0,
bias_wd_mult=1.0,
state_file_path=None):
"""
Prepare trainer.
Parameters:
----------
net : HybridBlock
Model.
optimizer_name : str
Name of optimizer.
wd : float
Weight decay rate.
momentum : float
Momentum value.
lr_mode : str
Learning rate scheduler mode.
lr : float
Learning rate.
lr_decay_period : int
Interval for periodic learning rate decays.
lr_decay_epoch : str
Epoches at which learning rate decays.
lr_decay : float
Decay rate of learning rate.
target_lr : float
Final learning rate.
poly_power : float
Power value for poly LR scheduler.
warmup_epochs : int
Number of warmup epochs.
warmup_lr : float
Starting warmup learning rate.
warmup_mode : str
Learning rate scheduler warmup mode.
batch_size : int
Training batch size.
num_epochs : int
Number of training epochs.
num_training_samples : int
Number of training samples in dataset.
dtype : str
Base data type for tensors.
gamma_wd_mult : float
Weight decay multiplier for batchnorm gamma.
beta_wd_mult : float
Weight decay multiplier for batchnorm beta.
bias_wd_mult : float
Weight decay multiplier for bias.
state_file_path : str, default None
Path for file with trainer state.
Returns:
-------
Trainer
Trainer.
LRScheduler
Learning rate scheduler.
"""
if gamma_wd_mult != 1.0:
for k, v in net.collect_params(".*gamma").items():
v.wd_mult = gamma_wd_mult
if beta_wd_mult != 1.0:
for k, v in net.collect_params(".*beta").items():
v.wd_mult = beta_wd_mult
if bias_wd_mult != 1.0:
for k, v in net.collect_params(".*bias").items():
v.wd_mult = bias_wd_mult
if lr_decay_period > 0:
lr_decay_epoch = list(range(lr_decay_period, num_epochs, lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in lr_decay_epoch.split(",")]
num_batches = num_training_samples // batch_size
lr_scheduler = LRScheduler(
mode=lr_mode,
base_lr=lr,
n_iters=num_batches,
n_epochs=num_epochs,
step=lr_decay_epoch,
step_factor=lr_decay,
target_lr=target_lr,
power=poly_power,
warmup_epochs=warmup_epochs,
warmup_lr=warmup_lr,
warmup_mode=warmup_mode)
optimizer_params = {"learning_rate": lr,
"wd": wd,
"momentum": momentum,
"lr_scheduler": lr_scheduler}
if dtype != "float32":
optimizer_params["multi_precision"] = True
trainer = gluon.Trainer(
params=net.collect_params(),
optimizer=optimizer_name,
optimizer_params=optimizer_params)
if (state_file_path is not None) and state_file_path and os.path.exists(state_file_path):
logging.info("Loading trainer states: {}".format(state_file_path))
trainer.load_states(state_file_path)
if trainer._optimizer.wd != wd:
trainer._optimizer.wd = wd
logging.info("Reset the weight decay: {}".format(wd))
# lr_scheduler = trainer._optimizer.lr_scheduler
trainer._optimizer.lr_scheduler = lr_scheduler
return trainer, lr_scheduler
def save_params(file_stem,
net,
trainer):
"""
Save current model/trainer parameters.
Parameters:
----------
file_stem : str
File stem (with path).
net : HybridBlock
Model.
trainer : Trainer
Trainer.
"""
net.save_parameters(file_stem + ".params")
trainer.save_states(file_stem + ".states")
def train_epoch(epoch,
net,
train_metric,
train_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx,
loss_func,
trainer,
lr_scheduler,
batch_size,
log_interval,
mixup,
mixup_epoch_tail,
label_smoothing,
num_classes,
num_epochs,
grad_clip_value,
batch_size_scale):
"""
Train model on particular epoch.
Parameters:
----------
epoch : int
Epoch number.
net : HybridBlock
Model.
train_metric : EvalMetric
Metric object instance.
train_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator.
batch_fn : func
Function for splitting data after extraction from data loader.
data_source_needs_reset : bool
Whether to reset data (if test_data is ImageRecordIter).
dtype : str
Base data type for tensors.
ctx : Context
MXNet context.
loss_func : Loss
Loss function.
trainer : Trainer
Trainer.
lr_scheduler : LRScheduler
Learning rate scheduler.
batch_size : int
Training batch size.
log_interval : int
Batch count period for logging.
mixup : bool
Whether to use mixup.
mixup_epoch_tail : int
Number of epochs without mixup at the end of training.
label_smoothing : bool
Whether to use label-smoothing.
num_classes : int
Number of model classes.
num_epochs : int
Number of training epochs.
grad_clip_value : float
Threshold for gradient clipping.
batch_size_scale : int
Manual batch-size increasing factor.
Returns:
-------
float
Loss value.
"""
labels_list_inds = None
batch_size_extend_count = 0
tic = time.time()
if data_source_needs_reset:
train_data.reset()
train_metric.reset()
train_loss = 0.0
i = 0
btic = time.time()
for i, batch in enumerate(train_data):
data_list, labels_list = batch_fn(batch, ctx)
if label_smoothing:
eta = 0.1
on_value = 1 - eta + eta / num_classes
off_value = eta / num_classes
labels_list_inds = labels_list
labels_list = [Y.one_hot(depth=num_classes, on_value=on_value, off_value=off_value) for Y in labels_list]
if mixup:
if not label_smoothing:
labels_list_inds = labels_list
labels_list = [Y.one_hot(depth=num_classes) for Y in labels_list]
if epoch < num_epochs - mixup_epoch_tail:
alpha = 1
lam = np.random.beta(alpha, alpha)
data_list = [lam * X + (1 - lam) * X[::-1] for X in data_list]
labels_list = [lam * Y + (1 - lam) * Y[::-1] for Y in labels_list]
with ag.record():
outputs_list = [net(X.astype(dtype, copy=False)) for X in data_list]
loss_list = [loss_func(yhat, y.astype(dtype, copy=False)) for yhat, y in zip(outputs_list, labels_list)]
for loss in loss_list:
loss.backward()
lr_scheduler.update(i, epoch)
if grad_clip_value is not None:
grads = [v.grad(ctx[0]) for v in net.collect_params().values() if v._grad is not None]
gluon.utils.clip_global_norm(grads, max_norm=grad_clip_value)
if batch_size_scale == 1:
trainer.step(batch_size)
else:
if (i + 1) % batch_size_scale == 0:
batch_size_extend_count = 0
trainer.step(batch_size * batch_size_scale)
for p in net.collect_params().values():
p.zero_grad()
else:
batch_size_extend_count += 1
train_loss += sum([loss.mean().asscalar() for loss in loss_list]) / len(loss_list)
train_metric.update(
labels=(labels_list if not (mixup or label_smoothing) else labels_list_inds),
preds=outputs_list)
if log_interval and not (i + 1) % log_interval:
speed = batch_size * log_interval / (time.time() - btic)
btic = time.time()
train_accuracy_msg = report_accuracy(metric=train_metric)
logging.info("Epoch[{}] Batch [{}]\tSpeed: {:.2f} samples/sec\t{}\tlr={:.5f}".format(
epoch + 1, i, speed, train_accuracy_msg, trainer.learning_rate))
if (batch_size_scale != 1) and (batch_size_extend_count > 0):
trainer.step(batch_size * batch_size_extend_count)
for p in net.collect_params().values():
p.zero_grad()
throughput = int(batch_size * (i + 1) / (time.time() - tic))
logging.info("[Epoch {}] speed: {:.2f} samples/sec\ttime cost: {:.2f} sec".format(
epoch + 1, throughput, time.time() - tic))
train_loss /= (i + 1)
train_accuracy_msg = report_accuracy(metric=train_metric)
logging.info("[Epoch {}] training: {}\tloss={:.4f}".format(
epoch + 1, train_accuracy_msg, train_loss))
return train_loss
def train_net(batch_size,
num_epochs,
start_epoch1,
train_data,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
net,
trainer,
lr_scheduler,
lp_saver,
log_interval,
mixup,
mixup_epoch_tail,
label_smoothing,
num_classes,
grad_clip_value,
batch_size_scale,
val_metric,
train_metric,
loss_func,
ctx):
"""
Main procedure for training model.
Parameters:
----------
batch_size : int
Training batch size.
num_epochs : int
Number of training epochs.
start_epoch1 : int
Number of starting epoch (1-based).
train_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator (training subset).
val_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator (validation subset).
batch_fn : func
Function for splitting data after extraction from data loader.
data_source_needs_reset : bool
Whether to reset data (if test_data is ImageRecordIter).
dtype : str
Base data type for tensors.
net : HybridBlock
Model.
trainer : Trainer
Trainer.
lr_scheduler : LRScheduler
Learning rate scheduler.
lp_saver : TrainLogParamSaver
Model/trainer state saver.
log_interval : int
Batch count period for logging.
mixup : bool
Whether to use mixup.
mixup_epoch_tail : int
Number of epochs without mixup at the end of training.
label_smoothing : bool
Whether to use label-smoothing.
num_classes : int
Number of model classes.
grad_clip_value : float
Threshold for gradient clipping.
batch_size_scale : int
Manual batch-size increasing factor.
val_metric : EvalMetric
Metric object instance (validation subset).
train_metric : EvalMetric
Metric object instance (training subset).
loss_func : Loss
Loss object instance.
ctx : Context
MXNet context.
"""
if batch_size_scale != 1:
for p in net.collect_params().values():
p.grad_req = "add"
if isinstance(ctx, mx.Context):
ctx = [ctx]
# loss_func = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=(not (mixup or label_smoothing)))
assert (type(start_epoch1) == int)
assert (start_epoch1 >= 1)
if start_epoch1 > 1:
logging.info("Start training from [Epoch {}]".format(start_epoch1))
validate(
metric=val_metric,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
val_accuracy_msg = report_accuracy(metric=val_metric)
logging.info("[Epoch {}] validation: {}".format(start_epoch1 - 1, val_accuracy_msg))
gtic = time.time()
for epoch in range(start_epoch1 - 1, num_epochs):
train_loss = train_epoch(
epoch=epoch,
net=net,
train_metric=train_metric,
train_data=train_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx,
loss_func=loss_func,
trainer=trainer,
lr_scheduler=lr_scheduler,
batch_size=batch_size,
log_interval=log_interval,
mixup=mixup,
mixup_epoch_tail=mixup_epoch_tail,
label_smoothing=label_smoothing,
num_classes=num_classes,
num_epochs=num_epochs,
grad_clip_value=grad_clip_value,
batch_size_scale=batch_size_scale)
validate(
metric=val_metric,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
val_accuracy_msg = report_accuracy(metric=val_metric)
logging.info("[Epoch {}] validation: {}".format(epoch + 1, val_accuracy_msg))
if lp_saver is not None:
lp_saver_kwargs = {"net": net, "trainer": trainer}
val_acc_values = val_metric.get()[1]
train_acc_values = train_metric.get()[1]
val_acc_values = val_acc_values if type(val_acc_values) == list else [val_acc_values]
train_acc_values = train_acc_values if type(train_acc_values) == list else [train_acc_values]
lp_saver.epoch_test_end_callback(
epoch1=(epoch + 1),
params=(val_acc_values + train_acc_values + [train_loss, trainer.learning_rate]),
**lp_saver_kwargs)
logging.info("Total time cost: {:.2f} sec".format(time.time() - gtic))
if lp_saver is not None:
opt_metric_name = get_metric_name(val_metric, lp_saver.acc_ind)
logging.info("Best {}: {:.4f} at {} epoch".format(
opt_metric_name, lp_saver.best_eval_metric_value, lp_saver.best_eval_metric_epoch))
def main():
"""
Main body of script.
"""
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
ctx, batch_size = prepare_mx_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
dtype=args.dtype,
net_extra_kwargs=ds_metainfo.train_net_extra_kwargs,
tune_layers=args.tune_layers,
classes=args.num_classes,
in_channels=args.in_channels,
do_hybridize=(not args.not_hybridize),
initializer=get_initializer(initializer_name=args.initializer),
ctx=ctx)
assert (hasattr(net, "classes"))
num_classes = net.classes
train_data = get_train_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=args.num_workers)
val_data = get_val_data_source(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=args.num_workers)
batch_fn = get_batch_fn(ds_metainfo=ds_metainfo)
num_training_samples = len(train_data._dataset) if not ds_metainfo.use_imgrec else ds_metainfo.num_training_samples
trainer, lr_scheduler = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
wd=args.wd,
momentum=args.momentum,
lr_mode=args.lr_mode,
lr=args.lr,
lr_decay_period=args.lr_decay_period,
lr_decay_epoch=args.lr_decay_epoch,
lr_decay=args.lr_decay,
target_lr=args.target_lr,
poly_power=args.poly_power,
warmup_epochs=args.warmup_epochs,
warmup_lr=args.warmup_lr,
warmup_mode=args.warmup_mode,
batch_size=batch_size,
num_epochs=args.num_epochs,
num_training_samples=num_training_samples,
dtype=args.dtype,
gamma_wd_mult=args.gamma_wd_mult,
beta_wd_mult=args.beta_wd_mult,
bias_wd_mult=args.bias_wd_mult,
state_file_path=args.resume_state)
if args.save_dir and args.save_interval:
param_names = ds_metainfo.val_metric_capts + ds_metainfo.train_metric_capts + ["Train.Loss", "LR"]
lp_saver = TrainLogParamSaver(
checkpoint_file_name_prefix="{}_{}".format(ds_metainfo.short_label, args.model),
last_checkpoint_file_name_suffix="last",
best_checkpoint_file_name_suffix=None,
last_checkpoint_dir_path=args.save_dir,
best_checkpoint_dir_path=None,
last_checkpoint_file_count=2,
best_checkpoint_file_count=2,
checkpoint_file_save_callback=save_params,
checkpoint_file_exts=(".params", ".states"),
save_interval=args.save_interval,
num_epochs=args.num_epochs,
param_names=param_names,
acc_ind=ds_metainfo.saver_acc_ind,
# bigger=[True],
# mask=None,
score_log_file_path=os.path.join(args.save_dir, "score.log"),
score_log_attempt_value=args.attempt,
best_map_log_file_path=os.path.join(args.save_dir, "best_map.log"))
else:
lp_saver = None
val_metric = get_composite_metric(ds_metainfo.val_metric_names, ds_metainfo.val_metric_extra_kwargs)
train_metric = get_composite_metric(ds_metainfo.train_metric_names, ds_metainfo.train_metric_extra_kwargs)
loss_kwargs = {"sparse_label": not (args.mixup or args.label_smoothing)}
if ds_metainfo.loss_extra_kwargs is not None:
loss_kwargs.update(ds_metainfo.loss_extra_kwargs)
loss_func = get_loss(ds_metainfo.loss_name, loss_kwargs)
train_net(
batch_size=batch_size,
num_epochs=args.num_epochs,
start_epoch1=args.start_epoch,
train_data=train_data,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=ds_metainfo.use_imgrec,
dtype=args.dtype,
net=net,
trainer=trainer,
lr_scheduler=lr_scheduler,
lp_saver=lp_saver,
log_interval=args.log_interval,
mixup=args.mixup,
mixup_epoch_tail=args.mixup_epoch_tail,
label_smoothing=args.label_smoothing,
num_classes=num_classes,
grad_clip_value=args.grad_clip,
batch_size_scale=args.batch_size_scale,
val_metric=val_metric,
train_metric=train_metric,
loss_func=loss_func,
ctx=ctx)
if __name__ == "__main__":
main()
| 28,277
| 30.489978
| 119
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/dataset_utils.py
|
"""
Dataset routines.
"""
__all__ = ['get_dataset_metainfo', 'get_train_data_source', 'get_val_data_source', 'get_test_data_source']
from chainer.iterators import MultiprocessIterator
from .datasets.imagenet1k_cls_dataset import ImageNet1KMetaInfo
from .datasets.cub200_2011_cls_dataset import CUB200MetaInfo
from .datasets.cifar10_cls_dataset import CIFAR10MetaInfo
from .datasets.cifar100_cls_dataset import CIFAR100MetaInfo
from .datasets.svhn_cls_dataset import SVHNMetaInfo
from .datasets.voc_seg_dataset import VOCMetaInfo
from .datasets.ade20k_seg_dataset import ADE20KMetaInfo
from .datasets.cityscapes_seg_dataset import CityscapesMetaInfo
from .datasets.coco_seg_dataset import CocoSegMetaInfo
from .datasets.coco_hpe1_dataset import CocoHpe1MetaInfo
from .datasets.coco_hpe2_dataset import CocoHpe2MetaInfo
from .datasets.coco_hpe3_dataset import CocoHpe3MetaInfo
def get_dataset_metainfo(dataset_name):
"""
Get dataset metainfo by name of dataset.
Parameters:
----------
dataset_name : str
Dataset name.
Returns:
-------
DatasetMetaInfo
Dataset metainfo.
"""
dataset_metainfo_map = {
"ImageNet1K": ImageNet1KMetaInfo,
"CUB200_2011": CUB200MetaInfo,
"CIFAR10": CIFAR10MetaInfo,
"CIFAR100": CIFAR100MetaInfo,
"SVHN": SVHNMetaInfo,
"VOC": VOCMetaInfo,
"ADE20K": ADE20KMetaInfo,
"Cityscapes": CityscapesMetaInfo,
"CocoSeg": CocoSegMetaInfo,
"CocoHpe1": CocoHpe1MetaInfo,
"CocoHpe2": CocoHpe2MetaInfo,
"CocoHpe3": CocoHpe3MetaInfo,
}
if dataset_name in dataset_metainfo_map.keys():
return dataset_metainfo_map[dataset_name]()
else:
raise Exception("Unrecognized dataset: {}".format(dataset_name))
def get_train_data_source(ds_metainfo,
batch_size,
num_workers):
transform = ds_metainfo.train_transform(ds_metainfo=ds_metainfo)
dataset = ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="train",
transform=transform)
ds_metainfo.update_from_dataset(dataset)
iterator = MultiprocessIterator(
dataset=dataset,
batch_size=batch_size,
repeat=False,
shuffle=True,
n_processes=num_workers,
shared_mem=300000000)
return {
# "transform": transform,
"iterator": iterator,
"ds_len": len(dataset)
}
def get_val_data_source(ds_metainfo,
batch_size,
num_workers):
transform = ds_metainfo.val_transform(ds_metainfo=ds_metainfo)
dataset = ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="val",
transform=transform)
ds_metainfo.update_from_dataset(dataset)
iterator = MultiprocessIterator(
dataset=dataset,
batch_size=batch_size,
repeat=False,
shuffle=False,
n_processes=num_workers,
shared_mem=100000000)
return {
# "transform": transform,
"iterator": iterator,
"ds_len": len(dataset)
}
def get_test_data_source(ds_metainfo,
batch_size,
num_workers):
transform = ds_metainfo.test_transform(ds_metainfo=ds_metainfo)
dataset = ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="test",
transform=transform)
ds_metainfo.update_from_dataset(dataset)
iterator = MultiprocessIterator(
dataset=dataset,
batch_size=batch_size,
repeat=False,
shuffle=False,
n_processes=num_workers,
shared_mem=300000000)
return {
# "transform": transform,
"iterator": iterator,
"ds_len": len(dataset)
}
| 3,818
| 30.04878
| 106
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/setup.py
|
from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='chainercv2',
version='0.0.62',
description='Image classification and segmentation models for Chainer',
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/osmr/imgclsmob',
author='Oleg Sémery',
author_email='osemery@gmail.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Image Recognition',
],
keywords='machine-learning deep-learning neuralnetwork image-classification chainer imagenet cifar svhn vgg resnet '
'pyramidnet diracnet densenet condensenet wrn drn dpn darknet fishnet espnetv2 xdensnet squeezenet '
'squeezenext shufflenet menet mobilenet igcv3 mnasnet darts xception inception polynet nasnet pnasnet ror '
'proxylessnas dianet efficientnet mixnet image-segmentation voc ade20k cityscapes coco pspnet deeplabv3 '
'fcn',
packages=find_packages(exclude=['datasets', 'metrics', 'others', '*.others', 'others.*', '*.others.*']),
include_package_data=True,
install_requires=['requests', 'chainer>=5.0.0'],
)
| 1,581
| 42.944444
| 120
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/utils.py
|
import logging
import os
import cupy
from chainer import using_config, Variable
from chainer.function import no_backprop_mode
from chainer.backends import cuda
from chainer.backends.cuda import to_cpu
from chainer.serializers import load_npz
from .chainercv2.model_provider import get_model
from .metrics.metric import EvalMetric, CompositeEvalMetric
from .metrics.cls_metrics import Top1Error, TopKError
from .metrics.seg_metrics import PixelAccuracyMetric, MeanIoUMetric
from .metrics.det_metrics import CocoDetMApMetric
from .metrics.hpe_metrics import CocoHpeOksApMetric
def prepare_ch_context(num_gpus):
use_gpus = (num_gpus > 0)
if use_gpus:
cuda.get_device_from_id(0).use()
# try:
# import cupy
# cuda.get_device(0).use()
# except:
# use_gpus = False
return use_gpus
class Predictor(object):
"""
Model predictor with preprocessing.
Parameters:
----------
model : Chain
Base model.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
model,
transform=None):
super(Predictor, self).__init__()
self.model = model
self.transform = transform
def do_transform(self, img):
if self.transform is not None:
return self.transform(img)
else:
return img
def __call__(self, imgs):
imgs = self.model.xp.asarray([self.do_transform(img) for img in imgs])
with using_config("train", False), no_backprop_mode():
imgs = Variable(imgs)
predictions = self.model(imgs)
output = to_cpu(predictions.array if hasattr(predictions, "array") else cupy.asnumpy(predictions))
# output = to_cpu(predictions.array)
return output
def prepare_model(model_name,
use_pretrained,
pretrained_model_file_path,
use_gpus=False,
net_extra_kwargs=None,
num_classes=None,
in_channels=None):
kwargs = {'pretrained': use_pretrained}
if num_classes is not None:
kwargs["classes"] = num_classes
if in_channels is not None:
kwargs["in_channels"] = in_channels
if net_extra_kwargs is not None:
kwargs.update(net_extra_kwargs)
net = get_model(model_name, **kwargs)
if pretrained_model_file_path:
assert (os.path.isfile(pretrained_model_file_path))
logging.info('Loading model: {}'.format(pretrained_model_file_path))
load_npz(
file=pretrained_model_file_path,
obj=net)
if use_gpus:
net.to_gpu()
return net
def report_accuracy(metric,
extended_log=False):
"""
Make report string for composite metric.
Parameters:
----------
metric : EvalMetric
Metric object instance.
extended_log : bool, default False
Whether to log more precise accuracy values.
Returns:
-------
str
Report string.
"""
def create_msg(name, value):
if type(value) in [list, tuple]:
if extended_log:
return "{}={} ({})".format("{}", "/".join(["{:.4f}"] * len(value)), "/".join(["{}"] * len(value))).\
format(name, *(value + value))
else:
return "{}={}".format("{}", "/".join(["{:.4f}"] * len(value))).format(name, *value)
else:
if extended_log:
return "{name}={value:.4f} ({value})".format(name=name, value=value)
else:
return "{name}={value:.4f}".format(name=name, value=value)
metric_info = metric.get()
if isinstance(metric, CompositeEvalMetric):
msg = ", ".join([create_msg(name=m[0], value=m[1]) for m in zip(*metric_info)])
elif isinstance(metric, EvalMetric):
msg = create_msg(name=metric_info[0], value=metric_info[1])
else:
raise Exception("Wrong metric type: {}".format(type(metric)))
return msg
def get_metric(metric_name, metric_extra_kwargs):
"""
Get metric by name.
Parameters:
----------
metric_name : str
Metric name.
metric_extra_kwargs : dict
Metric extra parameters.
Returns:
-------
EvalMetric
Metric object instance.
"""
if metric_name == "Top1Error":
return Top1Error(**metric_extra_kwargs)
elif metric_name == "TopKError":
return TopKError(**metric_extra_kwargs)
elif metric_name == "PixelAccuracyMetric":
return PixelAccuracyMetric(**metric_extra_kwargs)
elif metric_name == "MeanIoUMetric":
return MeanIoUMetric(**metric_extra_kwargs)
elif metric_name == "CocoDetMApMetric":
return CocoDetMApMetric(**metric_extra_kwargs)
elif metric_name == "CocoHpeOksApMetric":
return CocoHpeOksApMetric(**metric_extra_kwargs)
else:
raise Exception("Wrong metric name: {}".format(metric_name))
def get_composite_metric(metric_names, metric_extra_kwargs):
"""
Get composite metric by list of metric names.
Parameters:
----------
metric_names : list of str
Metric name list.
metric_extra_kwargs : list of dict
Metric extra parameters list.
Returns:
-------
CompositeEvalMetric
Metric object instance.
"""
if len(metric_names) == 1:
metric = get_metric(metric_names[0], metric_extra_kwargs[0])
else:
metric = CompositeEvalMetric()
for name, extra_kwargs in zip(metric_names, metric_extra_kwargs):
metric.add(get_metric(name, extra_kwargs))
return metric
def get_metric_name(metric, index):
"""
Get metric name by index in the composite metric.
Parameters:
----------
metric : CompositeEvalMetric or EvalMetric
Metric object instance.
index : int
Index.
Returns:
-------
str
Metric name.
"""
if isinstance(metric, CompositeEvalMetric):
return metric.metrics[index].name
elif isinstance(metric, EvalMetric):
assert (index == 0)
return metric.name
else:
raise Exception("Wrong metric type: {}".format(type(metric)))
| 6,272
| 28.176744
| 116
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/__init__.py
| 0
| 0
| 0
|
py
|
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/__init__.py
| 0
| 0
| 0
|
py
|
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/model_provider.py
|
from .models.alexnet import *
from .models.zfnet import *
from .models.vgg import *
from .models.bninception import *
from .models.resnet import *
from .models.preresnet import *
from .models.resnext import *
from .models.seresnet import *
from .models.sepreresnet import *
from .models.seresnext import *
from .models.senet import *
from .models.resnesta import *
from .models.airnet import *
from .models.airnext import *
from .models.bamresnet import *
from .models.cbamresnet import *
from .models.resattnet import *
from .models.sknet import *
from .models.scnet import *
from .models.regnet import *
from .models.diaresnet import *
from .models.diapreresnet import *
from .models.pyramidnet import *
from .models.diracnetv2 import *
from .models.sharesnet import *
from .models.densenet import *
from .models.condensenet import *
from .models.sparsenet import *
from .models.peleenet import *
from .models.wrn import *
from .models.drn import *
from .models.dpn import *
from .models.darknet import *
from .models.darknet53 import *
from .models.channelnet import *
from .models.irevnet import *
from .models.bagnet import *
from .models.dla import *
from .models.fishnet import *
from .models.espnetv2 import *
from .models.dicenet import *
from .models.hrnet import *
from .models.vovnet import *
from .models.selecsls import *
from .models.hardnet import *
from .models.xdensenet import *
from .models.squeezenet import *
from .models.squeezenext import *
from .models.shufflenet import *
from .models.shufflenetv2 import *
from .models.shufflenetv2b import *
from .models.menet import *
from .models.mobilenet import *
from .models.mobilenetb import *
from .models.fdmobilenet import *
from .models.mobilenetv2 import *
from .models.mobilenetv3 import *
from .models.igcv3 import *
from .models.ghostnet import *
from .models.mnasnet import *
from .models.darts import *
from .models.proxylessnas import *
from .models.fbnet import *
from .models.xception import *
from .models.inceptionv3 import *
from .models.inceptionv4 import *
from .models.inceptionresnetv1 import *
from .models.inceptionresnetv2 import *
from .models.polynet import *
from .models.nasnet import *
from .models.pnasnet import *
from .models.spnasnet import *
from .models.efficientnet import *
from .models.efficientnetedge import *
from .models.mixnet import *
from .models.nin_cifar import *
from .models.resnet_cifar import *
from .models.preresnet_cifar import *
from .models.resnext_cifar import *
from .models.seresnet_cifar import *
from .models.sepreresnet_cifar import *
from .models.pyramidnet_cifar import *
from .models.densenet_cifar import *
from .models.xdensenet_cifar import *
from .models.wrn_cifar import *
from .models.wrn1bit_cifar import *
from .models.ror_cifar import *
from .models.rir_cifar import *
from .models.resdropresnet_cifar import *
from .models.shakeshakeresnet_cifar import *
from .models.shakedropresnet_cifar import *
from .models.diaresnet_cifar import *
from .models.diapreresnet_cifar import *
from .models.octresnet import *
from .models.resneta import *
from .models.resnetd import *
from .models.fastseresnet import *
from .models.resnet_cub import *
from .models.seresnet_cub import *
from .models.mobilenet_cub import *
from .models.proxylessnas_cub import *
from .models.ntsnet_cub import *
from .models.fcn8sd import *
from .models.pspnet import *
from .models.deeplabv3 import *
from .models.icnet import *
from .models.fastscnn import *
from .models.cgnet import *
from .models.dabnet import *
from .models.sinet import *
from .models.bisenet import *
from .models.danet import *
from .models.fpenet import *
from .models.lednet import *
from .models.alphapose_coco import *
from .models.simplepose_coco import *
from .models.simpleposemobile_coco import *
from .models.lwopenpose_cmupan import *
from .models.ibppose_coco import *
from .models.centernet import *
from .models.lffd import *
from .models.voca import *
from .models.nvpattexp import *
from .models.jasper import *
from .models.jasperdr import *
from .models.quartznet import *
__all__ = ['get_model']
_models = {
'alexnet': alexnet,
'alexnetb': alexnetb,
'zfnet': zfnet,
'zfnetb': zfnetb,
'vgg11': vgg11,
'vgg13': vgg13,
'vgg16': vgg16,
'vgg19': vgg19,
'bn_vgg11': bn_vgg11,
'bn_vgg13': bn_vgg13,
'bn_vgg16': bn_vgg16,
'bn_vgg19': bn_vgg19,
'bn_vgg11b': bn_vgg11b,
'bn_vgg13b': bn_vgg13b,
'bn_vgg16b': bn_vgg16b,
'bn_vgg19b': bn_vgg19b,
'bninception': bninception,
'resnet10': resnet10,
'resnet12': resnet12,
'resnet14': resnet14,
'resnetbc14b': resnetbc14b,
'resnet16': resnet16,
'resnet18_wd4': resnet18_wd4,
'resnet18_wd2': resnet18_wd2,
'resnet18_w3d4': resnet18_w3d4,
'resnet18': resnet18,
'resnet26': resnet26,
'resnetbc26b': resnetbc26b,
'resnet34': resnet34,
'resnetbc38b': resnetbc38b,
'resnet50': resnet50,
'resnet50b': resnet50b,
'resnet101': resnet101,
'resnet101b': resnet101b,
'resnet152': resnet152,
'resnet152b': resnet152b,
'resnet200': resnet200,
'resnet200b': resnet200b,
'preresnet10': preresnet10,
'preresnet12': preresnet12,
'preresnet14': preresnet14,
'preresnetbc14b': preresnetbc14b,
'preresnet16': preresnet16,
'preresnet18_wd4': preresnet18_wd4,
'preresnet18_wd2': preresnet18_wd2,
'preresnet18_w3d4': preresnet18_w3d4,
'preresnet18': preresnet18,
'preresnet26': preresnet26,
'preresnetbc26b': preresnetbc26b,
'preresnet34': preresnet34,
'preresnetbc38b': preresnetbc38b,
'preresnet50': preresnet50,
'preresnet50b': preresnet50b,
'preresnet101': preresnet101,
'preresnet101b': preresnet101b,
'preresnet152': preresnet152,
'preresnet152b': preresnet152b,
'preresnet200': preresnet200,
'preresnet200b': preresnet200b,
'preresnet269b': preresnet269b,
'resnext14_16x4d': resnext14_16x4d,
'resnext14_32x2d': resnext14_32x2d,
'resnext14_32x4d': resnext14_32x4d,
'resnext26_16x4d': resnext26_16x4d,
'resnext26_32x2d': resnext26_32x2d,
'resnext26_32x4d': resnext26_32x4d,
'resnext38_32x4d': resnext38_32x4d,
'resnext50_32x4d': resnext50_32x4d,
'resnext101_32x4d': resnext101_32x4d,
'resnext101_64x4d': resnext101_64x4d,
'seresnet10': seresnet10,
'seresnet12': seresnet12,
'seresnet14': seresnet14,
'seresnet16': seresnet16,
'seresnet18': seresnet18,
'seresnet26': seresnet26,
'seresnetbc26b': seresnetbc26b,
'seresnet34': seresnet34,
'seresnetbc38b': seresnetbc38b,
'seresnet50': seresnet50,
'seresnet50b': seresnet50b,
'seresnet101': seresnet101,
'seresnet101b': seresnet101b,
'seresnet152': seresnet152,
'seresnet152b': seresnet152b,
'seresnet200': seresnet200,
'seresnet200b': seresnet200b,
'sepreresnet10': sepreresnet10,
'sepreresnet12': sepreresnet12,
'sepreresnet14': sepreresnet14,
'sepreresnet16': sepreresnet16,
'sepreresnet18': sepreresnet18,
'sepreresnet26': sepreresnet26,
'sepreresnetbc26b': sepreresnetbc26b,
'sepreresnet34': sepreresnet34,
'sepreresnetbc38b': sepreresnetbc38b,
'sepreresnet50': sepreresnet50,
'sepreresnet50b': sepreresnet50b,
'sepreresnet101': sepreresnet101,
'sepreresnet101b': sepreresnet101b,
'sepreresnet152': sepreresnet152,
'sepreresnet152b': sepreresnet152b,
'sepreresnet200': sepreresnet200,
'sepreresnet200b': sepreresnet200b,
'seresnext50_32x4d': seresnext50_32x4d,
'seresnext101_32x4d': seresnext101_32x4d,
'seresnext101_64x4d': seresnext101_64x4d,
'senet16': senet16,
'senet28': senet28,
'senet40': senet40,
'senet52': senet52,
'senet103': senet103,
'senet154': senet154,
'resnestabc14': resnestabc14,
'resnesta18': resnesta18,
'resnestabc26': resnestabc26,
'resnesta50': resnesta50,
'resnesta101': resnesta101,
'resnesta152': resnesta152,
'resnesta200': resnesta200,
'resnesta269': resnesta269,
'airnet50_1x64d_r2': airnet50_1x64d_r2,
'airnet50_1x64d_r16': airnet50_1x64d_r16,
'airnet101_1x64d_r2': airnet101_1x64d_r2,
'airnext50_32x4d_r2': airnext50_32x4d_r2,
'airnext101_32x4d_r2': airnext101_32x4d_r2,
'airnext101_32x4d_r16': airnext101_32x4d_r16,
'bam_resnet18': bam_resnet18,
'bam_resnet34': bam_resnet34,
'bam_resnet50': bam_resnet50,
'bam_resnet101': bam_resnet101,
'bam_resnet152': bam_resnet152,
'cbam_resnet18': cbam_resnet18,
'cbam_resnet34': cbam_resnet34,
'cbam_resnet50': cbam_resnet50,
'cbam_resnet101': cbam_resnet101,
'cbam_resnet152': cbam_resnet152,
'resattnet56': resattnet56,
'resattnet92': resattnet92,
'resattnet128': resattnet128,
'resattnet164': resattnet164,
'resattnet200': resattnet200,
'resattnet236': resattnet236,
'resattnet452': resattnet452,
'sknet50': sknet50,
'sknet101': sknet101,
'sknet152': sknet152,
'scnet50': scnet50,
'scnet101': scnet101,
'scneta50': scneta50,
'scneta101': scneta101,
'regnetx002': regnetx002,
'regnetx004': regnetx004,
'regnetx006': regnetx006,
'regnetx008': regnetx008,
'regnetx016': regnetx016,
'regnetx032': regnetx032,
'regnetx040': regnetx040,
'regnetx064': regnetx064,
'regnetx080': regnetx080,
'regnetx120': regnetx120,
'regnetx160': regnetx160,
'regnetx320': regnetx320,
'regnety002': regnety002,
'regnety004': regnety004,
'regnety006': regnety006,
'regnety008': regnety008,
'regnety016': regnety016,
'regnety032': regnety032,
'regnety040': regnety040,
'regnety064': regnety064,
'regnety080': regnety080,
'regnety120': regnety120,
'regnety160': regnety160,
'regnety320': regnety320,
'diaresnet10': diaresnet10,
'diaresnet12': diaresnet12,
'diaresnet14': diaresnet14,
'diaresnetbc14b': diaresnetbc14b,
'diaresnet16': diaresnet16,
'diaresnet18': diaresnet18,
'diaresnet26': diaresnet26,
'diaresnetbc26b': diaresnetbc26b,
'diaresnet34': diaresnet34,
'diaresnetbc38b': diaresnetbc38b,
'diaresnet50': diaresnet50,
'diaresnet50b': diaresnet50b,
'diaresnet101': diaresnet101,
'diaresnet101b': diaresnet101b,
'diaresnet152': diaresnet152,
'diaresnet152b': diaresnet152b,
'diaresnet200': diaresnet200,
'diaresnet200b': diaresnet200b,
'diapreresnet10': diapreresnet10,
'diapreresnet12': diapreresnet12,
'diapreresnet14': diapreresnet14,
'diapreresnetbc14b': diapreresnetbc14b,
'diapreresnet16': diapreresnet16,
'diapreresnet18': diapreresnet18,
'diapreresnet26': diapreresnet26,
'diapreresnetbc26b': diapreresnetbc26b,
'diapreresnet34': diapreresnet34,
'diapreresnetbc38b': diapreresnetbc38b,
'diapreresnet50': diapreresnet50,
'diapreresnet50b': diapreresnet50b,
'diapreresnet101': diapreresnet101,
'diapreresnet101b': diapreresnet101b,
'diapreresnet152': diapreresnet152,
'diapreresnet152b': diapreresnet152b,
'diapreresnet200': diapreresnet200,
'diapreresnet200b': diapreresnet200b,
'diapreresnet269b': diapreresnet269b,
'pyramidnet101_a360': pyramidnet101_a360,
'diracnet18v2': diracnet18v2,
'diracnet34v2': diracnet34v2,
'sharesnet18': sharesnet18,
'sharesnet34': sharesnet34,
'sharesnet50': sharesnet50,
'sharesnet50b': sharesnet50b,
'sharesnet101': sharesnet101,
'sharesnet101b': sharesnet101b,
'sharesnet152': sharesnet152,
'sharesnet152b': sharesnet152b,
'densenet121': densenet121,
'densenet161': densenet161,
'densenet169': densenet169,
'densenet201': densenet201,
'condensenet74_c4_g4': condensenet74_c4_g4,
'condensenet74_c8_g8': condensenet74_c8_g8,
'sparsenet121': sparsenet121,
'sparsenet161': sparsenet161,
'sparsenet169': sparsenet169,
'sparsenet201': sparsenet201,
'sparsenet264': sparsenet264,
'peleenet': peleenet,
'wrn50_2': wrn50_2,
'drnc26': drnc26,
'drnc42': drnc42,
'drnc58': drnc58,
'drnd22': drnd22,
'drnd38': drnd38,
'drnd54': drnd54,
'drnd105': drnd105,
'dpn68': dpn68,
'dpn68b': dpn68b,
'dpn98': dpn98,
'dpn107': dpn107,
'dpn131': dpn131,
'darknet_ref': darknet_ref,
'darknet_tiny': darknet_tiny,
'darknet19': darknet19,
'darknet53': darknet53,
'channelnet': channelnet,
'irevnet301': irevnet301,
'bagnet9': bagnet9,
'bagnet17': bagnet17,
'bagnet33': bagnet33,
'dla34': dla34,
'dla46c': dla46c,
'dla46xc': dla46xc,
'dla60': dla60,
'dla60x': dla60x,
'dla60xc': dla60xc,
'dla102': dla102,
'dla102x': dla102x,
'dla102x2': dla102x2,
'dla169': dla169,
'fishnet99': fishnet99,
'fishnet150': fishnet150,
'espnetv2_wd2': espnetv2_wd2,
'espnetv2_w1': espnetv2_w1,
'espnetv2_w5d4': espnetv2_w5d4,
'espnetv2_w3d2': espnetv2_w3d2,
'espnetv2_w2': espnetv2_w2,
'dicenet_wd5': dicenet_wd5,
'dicenet_wd2': dicenet_wd2,
'dicenet_w3d4': dicenet_w3d4,
'dicenet_w1': dicenet_w1,
'dicenet_w5d4': dicenet_w5d4,
'dicenet_w3d2': dicenet_w3d2,
'dicenet_w7d8': dicenet_w7d8,
'dicenet_w2': dicenet_w2,
'hrnet_w18_small_v1': hrnet_w18_small_v1,
'hrnet_w18_small_v2': hrnet_w18_small_v2,
'hrnetv2_w18': hrnetv2_w18,
'hrnetv2_w30': hrnetv2_w30,
'hrnetv2_w32': hrnetv2_w32,
'hrnetv2_w40': hrnetv2_w40,
'hrnetv2_w44': hrnetv2_w44,
'hrnetv2_w48': hrnetv2_w48,
'hrnetv2_w64': hrnetv2_w64,
'vovnet27s': vovnet27s,
'vovnet39': vovnet39,
'vovnet57': vovnet57,
'selecsls42': selecsls42,
'selecsls42b': selecsls42b,
'selecsls60': selecsls60,
'selecsls60b': selecsls60b,
'selecsls84': selecsls84,
'hardnet39ds': hardnet39ds,
'hardnet68ds': hardnet68ds,
'hardnet68': hardnet68,
'hardnet85': hardnet85,
'xdensenet121_2': xdensenet121_2,
'xdensenet161_2': xdensenet161_2,
'xdensenet169_2': xdensenet169_2,
'xdensenet201_2': xdensenet201_2,
'squeezenet_v1_0': squeezenet_v1_0,
'squeezenet_v1_1': squeezenet_v1_1,
'squeezeresnet_v1_0': squeezeresnet_v1_0,
'squeezeresnet_v1_1': squeezeresnet_v1_1,
'sqnxt23_w1': sqnxt23_w1,
'sqnxt23_w3d2': sqnxt23_w3d2,
'sqnxt23_w2': sqnxt23_w2,
'sqnxt23v5_w1': sqnxt23v5_w1,
'sqnxt23v5_w3d2': sqnxt23v5_w3d2,
'sqnxt23v5_w2': sqnxt23v5_w2,
'shufflenet_g1_w1': shufflenet_g1_w1,
'shufflenet_g2_w1': shufflenet_g2_w1,
'shufflenet_g3_w1': shufflenet_g3_w1,
'shufflenet_g4_w1': shufflenet_g4_w1,
'shufflenet_g8_w1': shufflenet_g8_w1,
'shufflenet_g1_w3d4': shufflenet_g1_w3d4,
'shufflenet_g3_w3d4': shufflenet_g3_w3d4,
'shufflenet_g1_wd2': shufflenet_g1_wd2,
'shufflenet_g3_wd2': shufflenet_g3_wd2,
'shufflenet_g1_wd4': shufflenet_g1_wd4,
'shufflenet_g3_wd4': shufflenet_g3_wd4,
'shufflenetv2_wd2': shufflenetv2_wd2,
'shufflenetv2_w1': shufflenetv2_w1,
'shufflenetv2_w3d2': shufflenetv2_w3d2,
'shufflenetv2_w2': shufflenetv2_w2,
'shufflenetv2b_wd2': shufflenetv2b_wd2,
'shufflenetv2b_w1': shufflenetv2b_w1,
'shufflenetv2b_w3d2': shufflenetv2b_w3d2,
'shufflenetv2b_w2': shufflenetv2b_w2,
'menet108_8x1_g3': menet108_8x1_g3,
'menet128_8x1_g4': menet128_8x1_g4,
'menet160_8x1_g8': menet160_8x1_g8,
'menet228_12x1_g3': menet228_12x1_g3,
'menet256_12x1_g4': menet256_12x1_g4,
'menet348_12x1_g3': menet348_12x1_g3,
'menet352_12x1_g8': menet352_12x1_g8,
'menet456_24x1_g3': menet456_24x1_g3,
'mobilenet_w1': mobilenet_w1,
'mobilenet_w3d4': mobilenet_w3d4,
'mobilenet_wd2': mobilenet_wd2,
'mobilenet_wd4': mobilenet_wd4,
'mobilenetb_w1': mobilenetb_w1,
'mobilenetb_w3d4': mobilenetb_w3d4,
'mobilenetb_wd2': mobilenetb_wd2,
'mobilenetb_wd4': mobilenetb_wd4,
'fdmobilenet_w1': fdmobilenet_w1,
'fdmobilenet_w3d4': fdmobilenet_w3d4,
'fdmobilenet_wd2': fdmobilenet_wd2,
'fdmobilenet_wd4': fdmobilenet_wd4,
'mobilenetv2_w1': mobilenetv2_w1,
'mobilenetv2_w3d4': mobilenetv2_w3d4,
'mobilenetv2_wd2': mobilenetv2_wd2,
'mobilenetv2_wd4': mobilenetv2_wd4,
'mobilenetv2b_w1': mobilenetv2b_w1,
'mobilenetv2b_w3d4': mobilenetv2b_w3d4,
'mobilenetv2b_wd2': mobilenetv2b_wd2,
'mobilenetv2b_wd4': mobilenetv2b_wd4,
'mobilenetv3_small_w7d20': mobilenetv3_small_w7d20,
'mobilenetv3_small_wd2': mobilenetv3_small_wd2,
'mobilenetv3_small_w3d4': mobilenetv3_small_w3d4,
'mobilenetv3_small_w1': mobilenetv3_small_w1,
'mobilenetv3_small_w5d4': mobilenetv3_small_w5d4,
'mobilenetv3_large_w7d20': mobilenetv3_large_w7d20,
'mobilenetv3_large_wd2': mobilenetv3_large_wd2,
'mobilenetv3_large_w3d4': mobilenetv3_large_w3d4,
'mobilenetv3_large_w1': mobilenetv3_large_w1,
'mobilenetv3_large_w5d4': mobilenetv3_large_w5d4,
'igcv3_w1': igcv3_w1,
'igcv3_w3d4': igcv3_w3d4,
'igcv3_wd2': igcv3_wd2,
'igcv3_wd4': igcv3_wd4,
'ghostnet': ghostnet,
'mnasnet_b1': mnasnet_b1,
'mnasnet_a1': mnasnet_a1,
'mnasnet_small': mnasnet_small,
'darts': darts,
'proxylessnas_cpu': proxylessnas_cpu,
'proxylessnas_gpu': proxylessnas_gpu,
'proxylessnas_mobile': proxylessnas_mobile,
'proxylessnas_mobile14': proxylessnas_mobile14,
'fbnet_cb': fbnet_cb,
'xception': xception,
'inceptionv3': inceptionv3,
'inceptionv4': inceptionv4,
'inceptionresnetv1': inceptionresnetv1,
'inceptionresnetv2': inceptionresnetv2,
'polynet': polynet,
'nasnet_4a1056': nasnet_4a1056,
'nasnet_6a4032': nasnet_6a4032,
'pnasnet5large': pnasnet5large,
'spnasnet': spnasnet,
'efficientnet_b0': efficientnet_b0,
'efficientnet_b1': efficientnet_b1,
'efficientnet_b2': efficientnet_b2,
'efficientnet_b3': efficientnet_b3,
'efficientnet_b4': efficientnet_b4,
'efficientnet_b5': efficientnet_b5,
'efficientnet_b6': efficientnet_b6,
'efficientnet_b7': efficientnet_b7,
'efficientnet_b8': efficientnet_b8,
'efficientnet_b0b': efficientnet_b0b,
'efficientnet_b1b': efficientnet_b1b,
'efficientnet_b2b': efficientnet_b2b,
'efficientnet_b3b': efficientnet_b3b,
'efficientnet_b4b': efficientnet_b4b,
'efficientnet_b5b': efficientnet_b5b,
'efficientnet_b6b': efficientnet_b6b,
'efficientnet_b7b': efficientnet_b7b,
'efficientnet_b0c': efficientnet_b0c,
'efficientnet_b1c': efficientnet_b1c,
'efficientnet_b2c': efficientnet_b2c,
'efficientnet_b3c': efficientnet_b3c,
'efficientnet_b4c': efficientnet_b4c,
'efficientnet_b5c': efficientnet_b5c,
'efficientnet_b6c': efficientnet_b6c,
'efficientnet_b7c': efficientnet_b7c,
'efficientnet_b8c': efficientnet_b8c,
'efficientnet_edge_small_b': efficientnet_edge_small_b,
'efficientnet_edge_medium_b': efficientnet_edge_medium_b,
'efficientnet_edge_large_b': efficientnet_edge_large_b,
'mixnet_s': mixnet_s,
'mixnet_m': mixnet_m,
'mixnet_l': mixnet_l,
'nin_cifar10': nin_cifar10,
'nin_cifar100': nin_cifar100,
'nin_svhn': nin_svhn,
'resnet20_cifar10': resnet20_cifar10,
'resnet20_cifar100': resnet20_cifar100,
'resnet20_svhn': resnet20_svhn,
'resnet56_cifar10': resnet56_cifar10,
'resnet56_cifar100': resnet56_cifar100,
'resnet56_svhn': resnet56_svhn,
'resnet110_cifar10': resnet110_cifar10,
'resnet110_cifar100': resnet110_cifar100,
'resnet110_svhn': resnet110_svhn,
'resnet164bn_cifar10': resnet164bn_cifar10,
'resnet164bn_cifar100': resnet164bn_cifar100,
'resnet164bn_svhn': resnet164bn_svhn,
'resnet272bn_cifar10': resnet272bn_cifar10,
'resnet272bn_cifar100': resnet272bn_cifar100,
'resnet272bn_svhn': resnet272bn_svhn,
'resnet542bn_cifar10': resnet542bn_cifar10,
'resnet542bn_cifar100': resnet542bn_cifar100,
'resnet542bn_svhn': resnet542bn_svhn,
'resnet1001_cifar10': resnet1001_cifar10,
'resnet1001_cifar100': resnet1001_cifar100,
'resnet1001_svhn': resnet1001_svhn,
'resnet1202_cifar10': resnet1202_cifar10,
'resnet1202_cifar100': resnet1202_cifar100,
'resnet1202_svhn': resnet1202_svhn,
'preresnet20_cifar10': preresnet20_cifar10,
'preresnet20_cifar100': preresnet20_cifar100,
'preresnet20_svhn': preresnet20_svhn,
'preresnet56_cifar10': preresnet56_cifar10,
'preresnet56_cifar100': preresnet56_cifar100,
'preresnet56_svhn': preresnet56_svhn,
'preresnet110_cifar10': preresnet110_cifar10,
'preresnet110_cifar100': preresnet110_cifar100,
'preresnet110_svhn': preresnet110_svhn,
'preresnet164bn_cifar10': preresnet164bn_cifar10,
'preresnet164bn_cifar100': preresnet164bn_cifar100,
'preresnet164bn_svhn': preresnet164bn_svhn,
'preresnet272bn_cifar10': preresnet272bn_cifar10,
'preresnet272bn_cifar100': preresnet272bn_cifar100,
'preresnet272bn_svhn': preresnet272bn_svhn,
'preresnet542bn_cifar10': preresnet542bn_cifar10,
'preresnet542bn_cifar100': preresnet542bn_cifar100,
'preresnet542bn_svhn': preresnet542bn_svhn,
'preresnet1001_cifar10': preresnet1001_cifar10,
'preresnet1001_cifar100': preresnet1001_cifar100,
'preresnet1001_svhn': preresnet1001_svhn,
'preresnet1202_cifar10': preresnet1202_cifar10,
'preresnet1202_cifar100': preresnet1202_cifar100,
'preresnet1202_svhn': preresnet1202_svhn,
'resnext20_16x4d_cifar10': resnext20_16x4d_cifar10,
'resnext20_16x4d_cifar100': resnext20_16x4d_cifar100,
'resnext20_16x4d_svhn': resnext20_16x4d_svhn,
'resnext20_32x2d_cifar10': resnext20_32x2d_cifar10,
'resnext20_32x2d_cifar100': resnext20_32x2d_cifar100,
'resnext20_32x2d_svhn': resnext20_32x2d_svhn,
'resnext20_32x4d_cifar10': resnext20_32x4d_cifar10,
'resnext20_32x4d_cifar100': resnext20_32x4d_cifar100,
'resnext20_32x4d_svhn': resnext20_32x4d_svhn,
'resnext29_32x4d_cifar10': resnext29_32x4d_cifar10,
'resnext29_32x4d_cifar100': resnext29_32x4d_cifar100,
'resnext29_32x4d_svhn': resnext29_32x4d_svhn,
'resnext29_16x64d_cifar10': resnext29_16x64d_cifar10,
'resnext29_16x64d_cifar100': resnext29_16x64d_cifar100,
'resnext29_16x64d_svhn': resnext29_16x64d_svhn,
'resnext272_1x64d_cifar10': resnext272_1x64d_cifar10,
'resnext272_1x64d_cifar100': resnext272_1x64d_cifar100,
'resnext272_1x64d_svhn': resnext272_1x64d_svhn,
'resnext272_2x32d_cifar10': resnext272_2x32d_cifar10,
'resnext272_2x32d_cifar100': resnext272_2x32d_cifar100,
'resnext272_2x32d_svhn': resnext272_2x32d_svhn,
'seresnet20_cifar10': seresnet20_cifar10,
'seresnet20_cifar100': seresnet20_cifar100,
'seresnet20_svhn': seresnet20_svhn,
'seresnet56_cifar10': seresnet56_cifar10,
'seresnet56_cifar100': seresnet56_cifar100,
'seresnet56_svhn': seresnet56_svhn,
'seresnet110_cifar10': seresnet110_cifar10,
'seresnet110_cifar100': seresnet110_cifar100,
'seresnet110_svhn': seresnet110_svhn,
'seresnet164bn_cifar10': seresnet164bn_cifar10,
'seresnet164bn_cifar100': seresnet164bn_cifar100,
'seresnet164bn_svhn': seresnet164bn_svhn,
'seresnet272bn_cifar10': seresnet272bn_cifar10,
'seresnet272bn_cifar100': seresnet272bn_cifar100,
'seresnet272bn_svhn': seresnet272bn_svhn,
'seresnet542bn_cifar10': seresnet542bn_cifar10,
'seresnet542bn_cifar100': seresnet542bn_cifar100,
'seresnet542bn_svhn': seresnet542bn_svhn,
'seresnet1001_cifar10': seresnet1001_cifar10,
'seresnet1001_cifar100': seresnet1001_cifar100,
'seresnet1001_svhn': seresnet1001_svhn,
'seresnet1202_cifar10': seresnet1202_cifar10,
'seresnet1202_cifar100': seresnet1202_cifar100,
'seresnet1202_svhn': seresnet1202_svhn,
'sepreresnet20_cifar10': sepreresnet20_cifar10,
'sepreresnet20_cifar100': sepreresnet20_cifar100,
'sepreresnet20_svhn': sepreresnet20_svhn,
'sepreresnet56_cifar10': sepreresnet56_cifar10,
'sepreresnet56_cifar100': sepreresnet56_cifar100,
'sepreresnet56_svhn': sepreresnet56_svhn,
'sepreresnet110_cifar10': sepreresnet110_cifar10,
'sepreresnet110_cifar100': sepreresnet110_cifar100,
'sepreresnet110_svhn': sepreresnet110_svhn,
'sepreresnet164bn_cifar10': sepreresnet164bn_cifar10,
'sepreresnet164bn_cifar100': sepreresnet164bn_cifar100,
'sepreresnet164bn_svhn': sepreresnet164bn_svhn,
'sepreresnet272bn_cifar10': sepreresnet272bn_cifar10,
'sepreresnet272bn_cifar100': sepreresnet272bn_cifar100,
'sepreresnet272bn_svhn': sepreresnet272bn_svhn,
'sepreresnet542bn_cifar10': sepreresnet542bn_cifar10,
'sepreresnet542bn_cifar100': sepreresnet542bn_cifar100,
'sepreresnet542bn_svhn': sepreresnet542bn_svhn,
'sepreresnet1001_cifar10': sepreresnet1001_cifar10,
'sepreresnet1001_cifar100': sepreresnet1001_cifar100,
'sepreresnet1001_svhn': sepreresnet1001_svhn,
'sepreresnet1202_cifar10': sepreresnet1202_cifar10,
'sepreresnet1202_cifar100': sepreresnet1202_cifar100,
'sepreresnet1202_svhn': sepreresnet1202_svhn,
'pyramidnet110_a48_cifar10': pyramidnet110_a48_cifar10,
'pyramidnet110_a48_cifar100': pyramidnet110_a48_cifar100,
'pyramidnet110_a48_svhn': pyramidnet110_a48_svhn,
'pyramidnet110_a84_cifar10': pyramidnet110_a84_cifar10,
'pyramidnet110_a84_cifar100': pyramidnet110_a84_cifar100,
'pyramidnet110_a84_svhn': pyramidnet110_a84_svhn,
'pyramidnet110_a270_cifar10': pyramidnet110_a270_cifar10,
'pyramidnet110_a270_cifar100': pyramidnet110_a270_cifar100,
'pyramidnet110_a270_svhn': pyramidnet110_a270_svhn,
'pyramidnet164_a270_bn_cifar10': pyramidnet164_a270_bn_cifar10,
'pyramidnet164_a270_bn_cifar100': pyramidnet164_a270_bn_cifar100,
'pyramidnet164_a270_bn_svhn': pyramidnet164_a270_bn_svhn,
'pyramidnet200_a240_bn_cifar10': pyramidnet200_a240_bn_cifar10,
'pyramidnet200_a240_bn_cifar100': pyramidnet200_a240_bn_cifar100,
'pyramidnet200_a240_bn_svhn': pyramidnet200_a240_bn_svhn,
'pyramidnet236_a220_bn_cifar10': pyramidnet236_a220_bn_cifar10,
'pyramidnet236_a220_bn_cifar100': pyramidnet236_a220_bn_cifar100,
'pyramidnet236_a220_bn_svhn': pyramidnet236_a220_bn_svhn,
'pyramidnet272_a200_bn_cifar10': pyramidnet272_a200_bn_cifar10,
'pyramidnet272_a200_bn_cifar100': pyramidnet272_a200_bn_cifar100,
'pyramidnet272_a200_bn_svhn': pyramidnet272_a200_bn_svhn,
'densenet40_k12_cifar10': densenet40_k12_cifar10,
'densenet40_k12_cifar100': densenet40_k12_cifar100,
'densenet40_k12_svhn': densenet40_k12_svhn,
'densenet40_k12_bc_cifar10': densenet40_k12_bc_cifar10,
'densenet40_k12_bc_cifar100': densenet40_k12_bc_cifar100,
'densenet40_k12_bc_svhn': densenet40_k12_bc_svhn,
'densenet40_k24_bc_cifar10': densenet40_k24_bc_cifar10,
'densenet40_k24_bc_cifar100': densenet40_k24_bc_cifar100,
'densenet40_k24_bc_svhn': densenet40_k24_bc_svhn,
'densenet40_k36_bc_cifar10': densenet40_k36_bc_cifar10,
'densenet40_k36_bc_cifar100': densenet40_k36_bc_cifar100,
'densenet40_k36_bc_svhn': densenet40_k36_bc_svhn,
'densenet100_k12_cifar10': densenet100_k12_cifar10,
'densenet100_k12_cifar100': densenet100_k12_cifar100,
'densenet100_k12_svhn': densenet100_k12_svhn,
'densenet100_k24_cifar10': densenet100_k24_cifar10,
'densenet100_k24_cifar100': densenet100_k24_cifar100,
'densenet100_k24_svhn': densenet100_k24_svhn,
'densenet100_k12_bc_cifar10': densenet100_k12_bc_cifar10,
'densenet100_k12_bc_cifar100': densenet100_k12_bc_cifar100,
'densenet100_k12_bc_svhn': densenet100_k12_bc_svhn,
'densenet190_k40_bc_cifar10': densenet190_k40_bc_cifar10,
'densenet190_k40_bc_cifar100': densenet190_k40_bc_cifar100,
'densenet190_k40_bc_svhn': densenet190_k40_bc_svhn,
'densenet250_k24_bc_cifar10': densenet250_k24_bc_cifar10,
'densenet250_k24_bc_cifar100': densenet250_k24_bc_cifar100,
'densenet250_k24_bc_svhn': densenet250_k24_bc_svhn,
'xdensenet40_2_k24_bc_cifar10': xdensenet40_2_k24_bc_cifar10,
'xdensenet40_2_k24_bc_cifar100': xdensenet40_2_k24_bc_cifar100,
'xdensenet40_2_k24_bc_svhn': xdensenet40_2_k24_bc_svhn,
'xdensenet40_2_k36_bc_cifar10': xdensenet40_2_k36_bc_cifar10,
'xdensenet40_2_k36_bc_cifar100': xdensenet40_2_k36_bc_cifar100,
'xdensenet40_2_k36_bc_svhn': xdensenet40_2_k36_bc_svhn,
'wrn16_10_cifar10': wrn16_10_cifar10,
'wrn16_10_cifar100': wrn16_10_cifar100,
'wrn16_10_svhn': wrn16_10_svhn,
'wrn28_10_cifar10': wrn28_10_cifar10,
'wrn28_10_cifar100': wrn28_10_cifar100,
'wrn28_10_svhn': wrn28_10_svhn,
'wrn40_8_cifar10': wrn40_8_cifar10,
'wrn40_8_cifar100': wrn40_8_cifar100,
'wrn40_8_svhn': wrn40_8_svhn,
'wrn20_10_1bit_cifar10': wrn20_10_1bit_cifar10,
'wrn20_10_1bit_cifar100': wrn20_10_1bit_cifar100,
'wrn20_10_1bit_svhn': wrn20_10_1bit_svhn,
'wrn20_10_32bit_cifar10': wrn20_10_32bit_cifar10,
'wrn20_10_32bit_cifar100': wrn20_10_32bit_cifar100,
'wrn20_10_32bit_svhn': wrn20_10_32bit_svhn,
'ror3_56_cifar10': ror3_56_cifar10,
'ror3_56_cifar100': ror3_56_cifar100,
'ror3_56_svhn': ror3_56_svhn,
'ror3_110_cifar10': ror3_110_cifar10,
'ror3_110_cifar100': ror3_110_cifar100,
'ror3_110_svhn': ror3_110_svhn,
'ror3_164_cifar10': ror3_164_cifar10,
'ror3_164_cifar100': ror3_164_cifar100,
'ror3_164_svhn': ror3_164_svhn,
'rir_cifar10': rir_cifar10,
'rir_cifar100': rir_cifar100,
'rir_svhn': rir_svhn,
'resdropresnet20_cifar10': resdropresnet20_cifar10,
'resdropresnet20_cifar100': resdropresnet20_cifar100,
'resdropresnet20_svhn': resdropresnet20_svhn,
'shakeshakeresnet20_2x16d_cifar10': shakeshakeresnet20_2x16d_cifar10,
'shakeshakeresnet20_2x16d_cifar100': shakeshakeresnet20_2x16d_cifar100,
'shakeshakeresnet20_2x16d_svhn': shakeshakeresnet20_2x16d_svhn,
'shakeshakeresnet26_2x32d_cifar10': shakeshakeresnet26_2x32d_cifar10,
'shakeshakeresnet26_2x32d_cifar100': shakeshakeresnet26_2x32d_cifar100,
'shakeshakeresnet26_2x32d_svhn': shakeshakeresnet26_2x32d_svhn,
'shakedropresnet20_cifar10': shakedropresnet20_cifar10,
'shakedropresnet20_cifar100': shakedropresnet20_cifar100,
'shakedropresnet20_svhn': shakedropresnet20_svhn,
'diaresnet20_cifar10': diaresnet20_cifar10,
'diaresnet20_cifar100': diaresnet20_cifar100,
'diaresnet20_svhn': diaresnet20_svhn,
'diaresnet56_cifar10': diaresnet56_cifar10,
'diaresnet56_cifar100': diaresnet56_cifar100,
'diaresnet56_svhn': diaresnet56_svhn,
'diaresnet110_cifar10': diaresnet110_cifar10,
'diaresnet110_cifar100': diaresnet110_cifar100,
'diaresnet110_svhn': diaresnet110_svhn,
'diaresnet164bn_cifar10': diaresnet164bn_cifar10,
'diaresnet164bn_cifar100': diaresnet164bn_cifar100,
'diaresnet164bn_svhn': diaresnet164bn_svhn,
'diaresnet1001_cifar10': diaresnet1001_cifar10,
'diaresnet1001_cifar100': diaresnet1001_cifar100,
'diaresnet1001_svhn': diaresnet1001_svhn,
'diaresnet1202_cifar10': diaresnet1202_cifar10,
'diaresnet1202_cifar100': diaresnet1202_cifar100,
'diaresnet1202_svhn': diaresnet1202_svhn,
'diapreresnet20_cifar10': diapreresnet20_cifar10,
'diapreresnet20_cifar100': diapreresnet20_cifar100,
'diapreresnet20_svhn': diapreresnet20_svhn,
'diapreresnet56_cifar10': diapreresnet56_cifar10,
'diapreresnet56_cifar100': diapreresnet56_cifar100,
'diapreresnet56_svhn': diapreresnet56_svhn,
'diapreresnet110_cifar10': diapreresnet110_cifar10,
'diapreresnet110_cifar100': diapreresnet110_cifar100,
'diapreresnet110_svhn': diapreresnet110_svhn,
'diapreresnet164bn_cifar10': diapreresnet164bn_cifar10,
'diapreresnet164bn_cifar100': diapreresnet164bn_cifar100,
'diapreresnet164bn_svhn': diapreresnet164bn_svhn,
'diapreresnet1001_cifar10': diapreresnet1001_cifar10,
'diapreresnet1001_cifar100': diapreresnet1001_cifar100,
'diapreresnet1001_svhn': diapreresnet1001_svhn,
'diapreresnet1202_cifar10': diapreresnet1202_cifar10,
'diapreresnet1202_cifar100': diapreresnet1202_cifar100,
'diapreresnet1202_svhn': diapreresnet1202_svhn,
'resneta10': resneta10,
'resnetabc14b': resnetabc14b,
'resneta18': resneta18,
'resneta50b': resneta50b,
'resneta101b': resneta101b,
'resneta152b': resneta152b,
'resnetd50b': resnetd50b,
'resnetd101b': resnetd101b,
'resnetd152b': resnetd152b,
'fastseresnet101b': fastseresnet101b,
'octresnet10_ad2': octresnet10_ad2,
'octresnet50b_ad2': octresnet50b_ad2,
'resnet10_cub': resnet10_cub,
'resnet12_cub': resnet12_cub,
'resnet14_cub': resnet14_cub,
'resnetbc14b_cub': resnetbc14b_cub,
'resnet16_cub': resnet16_cub,
'resnet18_cub': resnet18_cub,
'resnet26_cub': resnet26_cub,
'resnetbc26b_cub': resnetbc26b_cub,
'resnet34_cub': resnet34_cub,
'resnetbc38b_cub': resnetbc38b_cub,
'resnet50_cub': resnet50_cub,
'resnet50b_cub': resnet50b_cub,
'resnet101_cub': resnet101_cub,
'resnet101b_cub': resnet101b_cub,
'resnet152_cub': resnet152_cub,
'resnet152b_cub': resnet152b_cub,
'resnet200_cub': resnet200_cub,
'resnet200b_cub': resnet200b_cub,
'seresnet10_cub': seresnet10_cub,
'seresnet12_cub': seresnet12_cub,
'seresnet14_cub': seresnet14_cub,
'seresnetbc14b_cub': seresnetbc14b_cub,
'seresnet16_cub': seresnet16_cub,
'seresnet18_cub': seresnet18_cub,
'seresnet26_cub': seresnet26_cub,
'seresnetbc26b_cub': seresnetbc26b_cub,
'seresnet34_cub': seresnet34_cub,
'seresnetbc38b_cub': seresnetbc38b_cub,
'seresnet50_cub': seresnet50_cub,
'seresnet50b_cub': seresnet50b_cub,
'seresnet101_cub': seresnet101_cub,
'seresnet101b_cub': seresnet101b_cub,
'seresnet152_cub': seresnet152_cub,
'seresnet152b_cub': seresnet152b_cub,
'seresnet200_cub': seresnet200_cub,
'seresnet200b_cub': seresnet200b_cub,
'mobilenet_w1_cub': mobilenet_w1_cub,
'mobilenet_w3d4_cub': mobilenet_w3d4_cub,
'mobilenet_wd2_cub': mobilenet_wd2_cub,
'mobilenet_wd4_cub': mobilenet_wd4_cub,
'fdmobilenet_w1_cub': fdmobilenet_w1_cub,
'fdmobilenet_w3d4_cub': fdmobilenet_w3d4_cub,
'fdmobilenet_wd2_cub': fdmobilenet_wd2_cub,
'fdmobilenet_wd4_cub': fdmobilenet_wd4_cub,
'proxylessnas_cpu_cub': proxylessnas_cpu_cub,
'proxylessnas_gpu_cub': proxylessnas_gpu_cub,
'proxylessnas_mobile_cub': proxylessnas_mobile_cub,
'proxylessnas_mobile14_cub': proxylessnas_mobile14_cub,
'ntsnet_cub': ntsnet_cub,
'fcn8sd_resnetd50b_voc': fcn8sd_resnetd50b_voc,
'fcn8sd_resnetd101b_voc': fcn8sd_resnetd101b_voc,
'fcn8sd_resnetd50b_coco': fcn8sd_resnetd50b_coco,
'fcn8sd_resnetd101b_coco': fcn8sd_resnetd101b_coco,
'fcn8sd_resnetd50b_ade20k': fcn8sd_resnetd50b_ade20k,
'fcn8sd_resnetd101b_ade20k': fcn8sd_resnetd101b_ade20k,
'fcn8sd_resnetd50b_cityscapes': fcn8sd_resnetd50b_cityscapes,
'fcn8sd_resnetd101b_cityscapes': fcn8sd_resnetd101b_cityscapes,
'pspnet_resnetd50b_voc': pspnet_resnetd50b_voc,
'pspnet_resnetd101b_voc': pspnet_resnetd101b_voc,
'pspnet_resnetd50b_coco': pspnet_resnetd50b_coco,
'pspnet_resnetd101b_coco': pspnet_resnetd101b_coco,
'pspnet_resnetd50b_ade20k': pspnet_resnetd50b_ade20k,
'pspnet_resnetd101b_ade20k': pspnet_resnetd101b_ade20k,
'pspnet_resnetd50b_cityscapes': pspnet_resnetd50b_cityscapes,
'pspnet_resnetd101b_cityscapes': pspnet_resnetd101b_cityscapes,
'deeplabv3_resnetd50b_voc': deeplabv3_resnetd50b_voc,
'deeplabv3_resnetd101b_voc': deeplabv3_resnetd101b_voc,
'deeplabv3_resnetd152b_voc': deeplabv3_resnetd152b_voc,
'deeplabv3_resnetd50b_coco': deeplabv3_resnetd50b_coco,
'deeplabv3_resnetd101b_coco': deeplabv3_resnetd101b_coco,
'deeplabv3_resnetd152b_coco': deeplabv3_resnetd152b_coco,
'deeplabv3_resnetd50b_ade20k': deeplabv3_resnetd50b_ade20k,
'deeplabv3_resnetd101b_ade20k': deeplabv3_resnetd101b_ade20k,
'deeplabv3_resnetd50b_cityscapes': deeplabv3_resnetd50b_cityscapes,
'deeplabv3_resnetd101b_cityscapes': deeplabv3_resnetd101b_cityscapes,
'icnet_resnetd50b_cityscapes': icnet_resnetd50b_cityscapes,
'fastscnn_cityscapes': fastscnn_cityscapes,
'cgnet_cityscapes': cgnet_cityscapes,
'dabnet_cityscapes': dabnet_cityscapes,
'sinet_cityscapes': sinet_cityscapes,
'bisenet_resnet18_celebamaskhq': bisenet_resnet18_celebamaskhq,
'danet_resnetd50b_cityscapes': danet_resnetd50b_cityscapes,
'danet_resnetd101b_cityscapes': danet_resnetd101b_cityscapes,
'fpenet_cityscapes': fpenet_cityscapes,
'lednet_cityscapes': lednet_cityscapes,
'alphapose_fastseresnet101b_coco': alphapose_fastseresnet101b_coco,
'simplepose_resnet18_coco': simplepose_resnet18_coco,
'simplepose_resnet50b_coco': simplepose_resnet50b_coco,
'simplepose_resnet101b_coco': simplepose_resnet101b_coco,
'simplepose_resnet152b_coco': simplepose_resnet152b_coco,
'simplepose_resneta50b_coco': simplepose_resneta50b_coco,
'simplepose_resneta101b_coco': simplepose_resneta101b_coco,
'simplepose_resneta152b_coco': simplepose_resneta152b_coco,
'simplepose_mobile_resnet18_coco': simplepose_mobile_resnet18_coco,
'simplepose_mobile_resnet50b_coco': simplepose_mobile_resnet50b_coco,
'simplepose_mobile_mobilenet_w1_coco': simplepose_mobile_mobilenet_w1_coco,
'simplepose_mobile_mobilenetv2b_w1_coco': simplepose_mobile_mobilenetv2b_w1_coco,
'simplepose_mobile_mobilenetv3_small_w1_coco': simplepose_mobile_mobilenetv3_small_w1_coco,
'simplepose_mobile_mobilenetv3_large_w1_coco': simplepose_mobile_mobilenetv3_large_w1_coco,
'lwopenpose2d_mobilenet_cmupan_coco': lwopenpose2d_mobilenet_cmupan_coco,
'lwopenpose3d_mobilenet_cmupan_coco': lwopenpose3d_mobilenet_cmupan_coco,
'ibppose_coco': ibppose_coco,
'centernet_resnet18_voc': centernet_resnet18_voc,
'centernet_resnet18_coco': centernet_resnet18_coco,
'centernet_resnet50b_voc': centernet_resnet50b_voc,
'centernet_resnet50b_coco': centernet_resnet50b_coco,
'centernet_resnet101b_voc': centernet_resnet101b_voc,
'centernet_resnet101b_coco': centernet_resnet101b_coco,
'lffd20x5s320v2_widerface': lffd20x5s320v2_widerface,
'lffd25x8s560v1_widerface': lffd25x8s560v1_widerface,
'voca8flame': voca8flame,
'nvpattexp116bazel76': nvpattexp116bazel76,
'jasper5x3': jasper5x3,
'jasper10x4': jasper10x4,
'jasper10x5': jasper10x5,
'jasperdr10x5_en': jasperdr10x5_en,
'jasperdr10x5_en_nr': jasperdr10x5_en_nr,
'quartznet5x5_en_ls': quartznet5x5_en_ls,
'quartznet15x5_en': quartznet15x5_en,
'quartznet15x5_en_nr': quartznet15x5_en_nr,
'quartznet15x5_fr': quartznet15x5_fr,
'quartznet15x5_de': quartznet15x5_de,
'quartznet15x5_it': quartznet15x5_it,
'quartznet15x5_es': quartznet15x5_es,
'quartznet15x5_ca': quartznet15x5_ca,
'quartznet15x5_pl': quartznet15x5_pl,
'quartznet15x5_ru': quartznet15x5_ru,
'quartznet15x5_ru34': quartznet15x5_ru34,
}
def get_model(name, **kwargs):
"""
Get supported model.
Parameters:
----------
name : str
Name of model.
Returns:
-------
Chain
Resulted model.
"""
name = name.lower()
if name not in _models:
raise ValueError("Unsupported model: {}".format(name))
net = _models[name](**kwargs)
return net
| 39,465
| 34.813067
| 95
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/airnext.py
|
"""
AirNeXt for ImageNet-1K, implemented in Chainer.
Original paper: 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
"""
__all__ = ['AirNeXt', 'airnext50_32x4d_r2', 'airnext101_32x4d_r2', 'airnext101_32x4d_r16']
import os
import math
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import SimpleSequential, conv1x1_block, conv3x3_block
from .airnet import AirBlock, AirInitBlock
class AirNeXtBottleneck(Chain):
"""
AirNet bottleneck block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
ratio: int
Air compression ratio.
"""
def __init__(self,
in_channels,
out_channels,
stride,
cardinality,
bottleneck_width,
ratio):
super(AirNeXtBottleneck, self).__init__()
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
self.use_air_block = (stride == 1 and mid_channels < 512)
with self.init_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=group_width)
self.conv2 = conv3x3_block(
in_channels=group_width,
out_channels=group_width,
stride=stride,
groups=cardinality)
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
activation=None)
if self.use_air_block:
self.air = AirBlock(
in_channels=in_channels,
out_channels=group_width,
groups=(cardinality // ratio),
ratio=ratio)
def __call__(self, x):
if self.use_air_block:
att = self.air(x)
x = self.conv1(x)
x = self.conv2(x)
if self.use_air_block:
x = x * att
x = self.conv3(x)
return x
class AirNeXtUnit(Chain):
"""
AirNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
ratio: int
Air compression ratio.
"""
def __init__(self,
in_channels,
out_channels,
stride,
cardinality,
bottleneck_width,
ratio):
super(AirNeXtUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
with self.init_scope():
self.body = AirNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
ratio=ratio)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = F.relu
def __call__(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class AirNeXt(Chain):
"""
AirNet model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
ratio: int
Air compression ratio.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
ratio,
in_channels=3,
in_size=(224, 224),
classes=1000):
super(AirNeXt, self).__init__()
self.in_size = in_size
self.classes = classes
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
setattr(self.features, "init_block", AirInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential()
with stage.init_scope():
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
setattr(stage, "unit{}".format(j + 1), AirNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
ratio=ratio))
in_channels = out_channels
setattr(self.features, "stage{}".format(i + 1), stage)
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=7,
stride=1))
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, "flatten", partial(
F.reshape,
shape=(-1, in_channels)))
setattr(self.output, "fc", L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_airnext(blocks,
cardinality,
bottleneck_width,
base_channels,
ratio,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create AirNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
base_channels: int
Base number of channels.
ratio: int
Air compression ratio.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported AirNeXt with number of blocks: {}".format(blocks))
bottleneck_expansion = 4
init_block_channels = base_channels
channels_per_layers = [base_channels * (2 ** i) * bottleneck_expansion for i in range(len(layers))]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = AirNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
ratio=ratio,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def airnext50_32x4d_r2(**kwargs):
"""
AirNeXt50-32x4d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=50,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=2,
model_name="airnext50_32x4d_r2",
**kwargs)
def airnext101_32x4d_r2(**kwargs):
"""
AirNeXt101-32x4d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=101,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=2,
model_name="airnext101_32x4d_r2",
**kwargs)
def airnext101_32x4d_r16(**kwargs):
"""
AirNeXt101-32x4d (r=16) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=101,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=16,
model_name="airnext101_32x4d_r16",
**kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
airnext50_32x4d_r2,
airnext101_32x4d_r2,
airnext101_32x4d_r16,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != airnext50_32x4d_r2 or weight_count == 27604296)
assert (model != airnext101_32x4d_r2 or weight_count == 54099272)
assert (model != airnext101_32x4d_r16 or weight_count == 45456456)
x = np.zeros((1, 3, 224, 224), np.float32)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 11,883
| 30.356201
| 115
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/pspnet.py
|
"""
PSPNet for image segmentation, implemented in Chainer.
Original paper: 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105.
"""
__all__ = ['PSPNet', 'pspnet_resnetd50b_voc', 'pspnet_resnetd101b_voc', 'pspnet_resnetd50b_coco',
'pspnet_resnetd101b_coco', 'pspnet_resnetd50b_ade20k', 'pspnet_resnetd101b_ade20k',
'pspnet_resnetd50b_cityscapes', 'pspnet_resnetd101b_cityscapes', 'PyramidPooling']
import os
import chainer.functions as F
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import conv1x1, conv1x1_block, conv3x3_block, Concurrent
from .resnetd import resnetd50b, resnetd101b
class PSPFinalBlock(Chain):
"""
PSPNet final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
bottleneck_factor=4):
super(PSPFinalBlock, self).__init__()
assert (in_channels % bottleneck_factor == 0)
mid_channels = in_channels // bottleneck_factor
with self.init_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.dropout = partial(
F.dropout,
ratio=0.1)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
def __call__(self, x, out_size):
x = self.conv1(x)
x = self.dropout(x)
x = self.conv2(x)
x = F.resize_images(x, output_shape=out_size)
return x
class PyramidPoolingBranch(Chain):
"""
Pyramid Pooling branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
pool_out_size : int
Target output size of the image.
upscale_out_size : tuple of 2 int
Spatial size of output image for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
pool_out_size,
upscale_out_size):
super(PyramidPoolingBranch, self).__init__()
self.upscale_out_size = upscale_out_size
with self.init_scope():
self.pool = partial(
F.average_pooling_2d,
ksize=pool_out_size)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels)
def __call__(self, x):
in_size = self.upscale_out_size if self.upscale_out_size is not None else x.shape[2:]
x = self.pool(x)
x = self.conv(x)
x = F.resize_images(x, output_shape=in_size)
return x
class PyramidPooling(Chain):
"""
Pyramid Pooling module.
Parameters:
----------
in_channels : int
Number of input channels.
upscale_out_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
upscale_out_size):
super(PyramidPooling, self).__init__()
pool_out_sizes = [1, 2, 3, 6]
assert (len(pool_out_sizes) == 4)
assert (in_channels % 4 == 0)
mid_channels = in_channels // 4
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", partial(F.identity))
for i, pool_out_size in enumerate(pool_out_sizes):
setattr(self.branches, "branch{}".format(i + 2), PyramidPoolingBranch(
in_channels=in_channels,
out_channels=mid_channels,
pool_out_size=pool_out_size,
upscale_out_size=upscale_out_size))
def __call__(self, x):
x = self.branches(x)
return x
class PSPNet(Chain):
"""
PSPNet model from 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=21):
super(PSPNet, self).__init__()
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
with self.init_scope():
self.backbone = backbone
pool_out_size = (self.in_size[0] // 8, self.in_size[1] // 8) if fixed_size else None
self.pool = PyramidPooling(
in_channels=backbone_out_channels,
upscale_out_size=pool_out_size)
pool_out_channels = 2 * backbone_out_channels
self.final_block = PSPFinalBlock(
in_channels=pool_out_channels,
out_channels=classes,
bottleneck_factor=8)
if self.aux:
aux_out_channels = backbone_out_channels // 2
self.aux_block = PSPFinalBlock(
in_channels=aux_out_channels,
out_channels=classes,
bottleneck_factor=4)
def __call__(self, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
x, y = self.backbone(x)
x = self.pool(x)
x = self.final_block(x, in_size)
if self.aux:
y = self.aux_block(y, in_size)
return x, y
else:
return x
def get_pspnet(backbone,
classes,
aux=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create PSPNet model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
net = PSPNet(
backbone=backbone,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def pspnet_resnetd50b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for Pascal VOC from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd50b_voc", **kwargs)
def pspnet_resnetd101b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for Pascal VOC from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd101b_voc", **kwargs)
def pspnet_resnetd50b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for COCO from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd50b_coco", **kwargs)
def pspnet_resnetd101b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for COCO from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd101b_coco", **kwargs)
def pspnet_resnetd50b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for ADE20K from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd50b_ade20k",
**kwargs)
def pspnet_resnetd101b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for ADE20K from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd101b_ade20k",
**kwargs)
def pspnet_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for Cityscapes from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd50b_cityscapes",
**kwargs)
def pspnet_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for Cityscapes from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd101b_cityscapes",
**kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
in_size = (480, 480)
aux = True
pretrained = False
models = [
(pspnet_resnetd50b_voc, 21),
(pspnet_resnetd101b_voc, 21),
(pspnet_resnetd50b_coco, 21),
(pspnet_resnetd101b_coco, 21),
(pspnet_resnetd50b_ade20k, 150),
(pspnet_resnetd101b_ade20k, 150),
(pspnet_resnetd50b_cityscapes, 19),
(pspnet_resnetd101b_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != pspnet_resnetd50b_voc or weight_count == 49081578)
assert (model != pspnet_resnetd101b_voc or weight_count == 68073706)
assert (model != pspnet_resnetd50b_coco or weight_count == 49081578)
assert (model != pspnet_resnetd101b_coco or weight_count == 68073706)
assert (model != pspnet_resnetd50b_ade20k or weight_count == 49180908)
assert (model != pspnet_resnetd101b_ade20k or weight_count == 68173036)
assert (model != pspnet_resnetd50b_cityscapes or weight_count == 49080038)
assert (model != pspnet_resnetd101b_cityscapes or weight_count == 68072166)
else:
assert (model != pspnet_resnetd50b_voc or weight_count == 46716373)
assert (model != pspnet_resnetd101b_voc or weight_count == 65708501)
assert (model != pspnet_resnetd50b_coco or weight_count == 46716373)
assert (model != pspnet_resnetd101b_coco or weight_count == 65708501)
assert (model != pspnet_resnetd50b_ade20k or weight_count == 46782550)
assert (model != pspnet_resnetd101b_ade20k or weight_count == 65774678)
assert (model != pspnet_resnetd50b_cityscapes or weight_count == 46715347)
assert (model != pspnet_resnetd101b_cityscapes or weight_count == 65707475)
x = np.zeros((1, 3, in_size[0], in_size[1]), np.float32)
ys = net(x)
y = ys[0] if aux else ys
assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and
(y.shape[3] == x.shape[3]))
if __name__ == "__main__":
_test()
| 18,152
| 36.122699
| 115
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/dla.py
|
"""
DLA for ImageNet-1K, implemented in Chainer.
Original paper: 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
"""
__all__ = ['DLA', 'dla34', 'dla46c', 'dla46xc', 'dla60', 'dla60x', 'dla60xc', 'dla102', 'dla102x', 'dla102x2', 'dla169']
import os
import chainer.functions as F
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import conv1x1, conv1x1_block, conv3x3_block, conv7x7_block, SimpleSequential
from .resnet import ResBlock, ResBottleneck
from .resnext import ResNeXtBottleneck
class DLABottleneck(ResBottleneck):
"""
DLA bottleneck block for residual path in residual block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
bottleneck_factor : int, default 2
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck_factor=2):
super(DLABottleneck, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck_factor=bottleneck_factor)
class DLABottleneckX(ResNeXtBottleneck):
"""
DLA ResNeXt-like bottleneck block for residual path in residual block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
cardinality: int, default 32
Number of groups.
bottleneck_width: int, default 8
Width of bottleneck block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
cardinality=32,
bottleneck_width=8):
super(DLABottleneckX, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
cardinality=cardinality,
bottleneck_width=bottleneck_width)
class DLAResBlock(Chain):
"""
DLA residual block with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
body_class : nn.Module, default ResBlock
Residual block body class.
return_down : bool, default False
Whether return downsample result.
"""
def __init__(self,
in_channels,
out_channels,
stride,
body_class=ResBlock,
return_down=False):
super(DLAResBlock, self).__init__()
self.return_down = return_down
self.downsample = (stride > 1)
self.project = (in_channels != out_channels)
with self.init_scope():
self.body = body_class(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.activ = F.relu
if self.downsample:
self.downsample_pool = partial(
F.max_pooling_2d,
ksize=stride,
stride=stride,
cover_all=False)
if self.project:
self.project_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
activation=None)
def __call__(self, x):
down = self.downsample_pool(x) if self.downsample else x
identity = self.project_conv(down) if self.project else down
if identity is None:
identity = x
x = self.body(x)
x += identity
x = self.activ(x)
if self.return_down:
return x, down
else:
return x
class DLARoot(Chain):
"""
DLA root block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
residual : bool
Whether use residual connection.
"""
def __init__(self,
in_channels,
out_channels,
residual):
super(DLARoot, self).__init__()
self.residual = residual
with self.init_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
activation=None)
self.activ = F.relu
def __call__(self, x2, x1, extra):
last_branch = x2
x = F.concat((x2, x1) + tuple(extra), axis=1)
x = self.conv(x)
if self.residual:
x += last_branch
x = self.activ(x)
return x
class DLATree(Chain):
"""
DLA tree unit. It's like iterative stage.
Parameters:
----------
levels : int
Number of levels in the stage.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
res_body_class : nn.Module
Residual block body class.
stride : int or tuple/list of 2 int
Stride of the convolution in a residual block.
root_residual : bool
Whether use residual connection in the root.
root_dim : int
Number of input channels in the root block.
first_tree : bool, default False
Is this tree stage the first stage in the net.
input_level : bool, default True
Is this tree unit the first unit in the stage.
return_down : bool, default False
Whether return downsample result.
"""
def __init__(self,
levels,
in_channels,
out_channels,
res_body_class,
stride,
root_residual,
root_dim=0,
first_tree=False,
input_level=True,
return_down=False):
super(DLATree, self).__init__()
self.return_down = return_down
self.add_down = (input_level and not first_tree)
self.root_level = (levels == 1)
if root_dim == 0:
root_dim = 2 * out_channels
if self.add_down:
root_dim += in_channels
with self.init_scope():
if self.root_level:
self.tree1 = DLAResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
body_class=res_body_class,
return_down=True)
self.tree2 = DLAResBlock(
in_channels=out_channels,
out_channels=out_channels,
stride=1,
body_class=res_body_class,
return_down=False)
else:
self.tree1 = DLATree(
levels=levels - 1,
in_channels=in_channels,
out_channels=out_channels,
res_body_class=res_body_class,
stride=stride,
root_residual=root_residual,
root_dim=0,
input_level=False,
return_down=True)
self.tree2 = DLATree(
levels=levels - 1,
in_channels=out_channels,
out_channels=out_channels,
res_body_class=res_body_class,
stride=1,
root_residual=root_residual,
root_dim=root_dim + out_channels,
input_level=False,
return_down=False)
if self.root_level:
self.root = DLARoot(
in_channels=root_dim,
out_channels=out_channels,
residual=root_residual)
def __call__(self, x, extra=None):
extra = [] if extra is None else extra
x1, down = self.tree1(x)
if self.add_down:
extra.append(down)
if self.root_level:
x2 = self.tree2(x1)
x = self.root(x2, x1, extra)
else:
extra.append(x1)
x = self.tree2(x1, extra)
if self.return_down:
return x, down
else:
return x
class DLAInitBlock(Chain):
"""
DLA specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(DLAInitBlock, self).__init__()
mid_channels = out_channels // 2
with self.init_scope():
self.conv1 = conv7x7_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels)
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
stride=2)
def __call__(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class DLA(Chain):
"""
DLA model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
levels : int
Number of levels in each stage.
channels : list of int
Number of output channels for each stage.
init_block_channels : int
Number of output channels for the initial unit.
res_body_class : nn.Module
Residual block body class.
residual_root : bool
Whether use residual connection in the root blocks.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
levels,
channels,
init_block_channels,
res_body_class,
residual_root,
in_channels=3,
in_size=(224, 224),
classes=1000):
super(DLA, self).__init__()
self.in_size = in_size
self.classes = classes
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
setattr(self.features, "init_block", DLAInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i in range(len(levels)):
levels_i = levels[i]
out_channels = channels[i]
first_tree = (i == 0)
setattr(self.features, "stage{}".format(i + 1), DLATree(
levels=levels_i,
in_channels=in_channels,
out_channels=out_channels,
res_body_class=res_body_class,
stride=2,
root_residual=residual_root,
first_tree=first_tree))
in_channels = out_channels
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=7,
stride=1))
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, "final_conv", conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True))
setattr(self.output, "final_flatten", partial(
F.reshape,
shape=(-1, classes)))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_dla(levels,
channels,
res_body_class,
residual_root=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create DLA model with specific parameters.
Parameters:
----------
levels : int
Number of levels in each stage.
channels : list of int
Number of output channels for each stage.
res_body_class : nn.Module
Residual block body class.
residual_root : bool, default False
Whether use residual connection in the root blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
net = DLA(
levels=levels,
channels=channels,
init_block_channels=init_block_channels,
res_body_class=res_body_class,
residual_root=residual_root,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def dla34(**kwargs):
"""
DLA-34 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 2, 1], channels=[64, 128, 256, 512], res_body_class=ResBlock, model_name="dla34",
**kwargs)
def dla46c(**kwargs):
"""
DLA-46-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 2, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneck, model_name="dla46c",
**kwargs)
def dla46xc(**kwargs):
"""
DLA-X-46-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 2, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneckX,
model_name="dla46xc", **kwargs)
def dla60(**kwargs):
"""
DLA-60 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 3, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck,
model_name="dla60", **kwargs)
def dla60x(**kwargs):
"""
DLA-X-60 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 3, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX,
model_name="dla60x", **kwargs)
def dla60xc(**kwargs):
"""
DLA-X-60-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 3, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneckX,
model_name="dla60xc", **kwargs)
def dla102(**kwargs):
"""
DLA-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck,
residual_root=True, model_name="dla102", **kwargs)
def dla102x(**kwargs):
"""
DLA-X-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX,
residual_root=True, model_name="dla102x", **kwargs)
def dla102x2(**kwargs):
"""
DLA-X2-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
class DLABottleneckX64(DLABottleneckX):
def __init__(self, in_channels, out_channels, stride):
super(DLABottleneckX64, self).__init__(in_channels, out_channels, stride, cardinality=64)
return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX64,
residual_root=True, model_name="dla102x2", **kwargs)
def dla169(**kwargs):
"""
DLA-169 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[2, 3, 5, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck,
residual_root=True, model_name="dla169", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
dla34,
dla46c,
dla46xc,
dla60,
dla60x,
dla60xc,
dla102,
dla102x,
dla102x2,
dla169,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != dla34 or weight_count == 15742104)
assert (model != dla46c or weight_count == 1301400)
assert (model != dla46xc or weight_count == 1068440)
assert (model != dla60 or weight_count == 22036632)
assert (model != dla60x or weight_count == 17352344)
assert (model != dla60xc or weight_count == 1319832)
assert (model != dla102 or weight_count == 33268888)
assert (model != dla102x or weight_count == 26309272)
assert (model != dla102x2 or weight_count == 41282200)
assert (model != dla169 or weight_count == 53389720)
x = np.zeros((1, 3, 224, 224), np.float32)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 20,401
| 30.729393
| 120
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/proxylessnas.py
|
"""
ProxylessNAS for ImageNet-1K, implemented in Chainer.
Original paper: 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
"""
__all__ = ['ProxylessNAS', 'proxylessnas_cpu', 'proxylessnas_gpu', 'proxylessnas_mobile', 'proxylessnas_mobile14',
'get_proxylessnas']
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import ConvBlock, conv1x1_block, conv3x3_block, SimpleSequential
class ProxylessBlock(Chain):
"""
ProxylessNAS block for residual path in ProxylessNAS unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
ksize : int
Convolution window size.
stride : int
Stride of the convolution.
bn_eps : float
Small float added to variance in Batch norm.
expansion : int
Expansion ratio.
"""
def __init__(self,
in_channels,
out_channels,
ksize,
stride,
bn_eps,
expansion):
super(ProxylessBlock, self).__init__()
self.use_bc = (expansion > 1)
mid_channels = in_channels * expansion
with self.init_scope():
if self.use_bc:
self.bc_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_eps=bn_eps,
activation="relu6")
pad = (ksize - 1) // 2
self.dw_conv = ConvBlock(
in_channels=mid_channels,
out_channels=mid_channels,
ksize=ksize,
stride=stride,
pad=pad,
groups=mid_channels,
bn_eps=bn_eps,
activation="relu6")
self.pw_conv = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=None)
def __call__(self, x):
if self.use_bc:
x = self.bc_conv(x)
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
class ProxylessUnit(Chain):
"""
ProxylessNAS unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
ksize : int
Convolution window size for body block.
stride : int
Stride of the convolution.
bn_eps : float
Small float added to variance in Batch norm.
expansion : int
Expansion ratio for body block.
residual : bool
Whether to use residual branch.
shortcut : bool
Whether to use identity branch.
"""
def __init__(self,
in_channels,
out_channels,
ksize,
stride,
bn_eps,
expansion,
residual,
shortcut):
super(ProxylessUnit, self).__init__()
assert (residual or shortcut)
self.residual = residual
self.shortcut = shortcut
with self.init_scope():
if self.residual:
self.body = ProxylessBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=ksize,
stride=stride,
bn_eps=bn_eps,
expansion=expansion)
def __call__(self, x):
if not self.residual:
return x
if not self.shortcut:
return self.body(x)
identity = x
x = self.body(x)
x = identity + x
return x
class ProxylessNAS(Chain):
"""
ProxylessNAS model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final unit.
residuals : list of list of int
Whether to use residual branch in units.
shortcuts : list of list of int
Whether to use identity branch in units.
ksizes : list of list of int
Convolution window size for each units.
expansions : list of list of int
Expansion ratio for each units.
bn_eps : float, default 1e-3
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
residuals,
shortcuts,
ksizes,
expansions,
bn_eps=1e-3,
in_channels=3,
in_size=(224, 224),
classes=1000):
super(ProxylessNAS, self).__init__()
self.in_size = in_size
self.classes = classes
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
setattr(self.features, "init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
stride=2,
bn_eps=bn_eps,
activation="relu6"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential()
residuals_per_stage = residuals[i]
shortcuts_per_stage = shortcuts[i]
ksizes_per_stage = ksizes[i]
expansions_per_stage = expansions[i]
with stage.init_scope():
for j, out_channels in enumerate(channels_per_stage):
residual = (residuals_per_stage[j] == 1)
shortcut = (shortcuts_per_stage[j] == 1)
ksize = ksizes_per_stage[j]
expansion = expansions_per_stage[j]
stride = 2 if (j == 0) and (i != 0) else 1
setattr(stage, "unit{}".format(j + 1), ProxylessUnit(
in_channels=in_channels,
out_channels=out_channels,
ksize=ksize,
stride=stride,
bn_eps=bn_eps,
expansion=expansion,
residual=residual,
shortcut=shortcut))
in_channels = out_channels
setattr(self.features, "stage{}".format(i + 1), stage)
setattr(self.features, "final_block", conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_eps=bn_eps,
activation="relu6"))
in_channels = final_block_channels
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=7,
stride=1))
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, "flatten", partial(
F.reshape,
shape=(-1, in_channels)))
setattr(self.output, "fc", L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_proxylessnas(version,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create ProxylessNAS model with specific parameters.
Parameters:
----------
version : str
Version of ProxylessNAS ('cpu', 'gpu', 'mobile' or 'mobile14').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
if version == "cpu":
residuals = [[1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
channels = [[24], [32, 32, 32, 32], [48, 48, 48, 48], [88, 88, 88, 88, 104, 104, 104, 104],
[216, 216, 216, 216, 360]]
kernel_sizes = [[3], [3, 3, 3, 3], [3, 3, 3, 5], [3, 3, 3, 3, 5, 3, 3, 3], [5, 5, 5, 3, 5]]
expansions = [[1], [6, 3, 3, 3], [6, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 3, 3, 3, 6]]
init_block_channels = 40
final_block_channels = 1432
elif version == "gpu":
residuals = [[1], [1, 0, 0, 0], [1, 0, 0, 1], [1, 0, 0, 1, 1, 0, 1, 1], [1, 1, 1, 1, 1]]
channels = [[24], [32, 32, 32, 32], [56, 56, 56, 56], [112, 112, 112, 112, 128, 128, 128, 128],
[256, 256, 256, 256, 432]]
kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 3, 3], [7, 5, 5, 5, 5, 3, 3, 5], [7, 7, 7, 5, 7]]
expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 6, 6, 6]]
init_block_channels = 40
final_block_channels = 1728
elif version == "mobile":
residuals = [[1], [1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
channels = [[16], [32, 32, 32, 32], [40, 40, 40, 40], [80, 80, 80, 80, 96, 96, 96, 96],
[192, 192, 192, 192, 320]]
kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 5, 5], [7, 5, 5, 5, 5, 5, 5, 5], [7, 7, 7, 7, 7]]
expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 3, 3, 6]]
init_block_channels = 32
final_block_channels = 1280
elif version == "mobile14":
residuals = [[1], [1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
channels = [[24], [40, 40, 40, 40], [56, 56, 56, 56], [112, 112, 112, 112, 136, 136, 136, 136],
[256, 256, 256, 256, 448]]
kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 5, 5], [7, 5, 5, 5, 5, 5, 5, 5], [7, 7, 7, 7, 7]]
expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 3, 3, 6]]
init_block_channels = 48
final_block_channels = 1792
else:
raise ValueError("Unsupported ProxylessNAS version: {}".format(version))
shortcuts = [[0], [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1, 0, 1, 1, 1], [0, 1, 1, 1, 0]]
net = ProxylessNAS(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
residuals=residuals,
shortcuts=shortcuts,
ksizes=kernel_sizes,
expansions=expansions,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def proxylessnas_cpu(**kwargs):
"""
ProxylessNAS (CPU) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="cpu", model_name="proxylessnas_cpu", **kwargs)
def proxylessnas_gpu(**kwargs):
"""
ProxylessNAS (GPU) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="gpu", model_name="proxylessnas_gpu", **kwargs)
def proxylessnas_mobile(**kwargs):
"""
ProxylessNAS (Mobile) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="mobile", model_name="proxylessnas_mobile", **kwargs)
def proxylessnas_mobile14(**kwargs):
"""
ProxylessNAS (Mobile-14) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="mobile14", model_name="proxylessnas_mobile14", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
proxylessnas_cpu,
proxylessnas_gpu,
proxylessnas_mobile,
proxylessnas_mobile14,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != proxylessnas_cpu or weight_count == 4361648)
assert (model != proxylessnas_gpu or weight_count == 7119848)
assert (model != proxylessnas_mobile or weight_count == 4080512)
assert (model != proxylessnas_mobile14 or weight_count == 6857568)
x = np.zeros((14, 3, 224, 224), np.float32)
y = net(x)
assert (y.shape == (14, 1000))
if __name__ == "__main__":
_test()
| 14,977
| 34.918465
| 118
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/shufflenetv2.py
|
"""
ShuffleNet V2 for ImageNet-1K, implemented in Chainer.
Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
"""
__all__ = ['ShuffleNetV2', 'shufflenetv2_wd2', 'shufflenetv2_w1', 'shufflenetv2_w3d2', 'shufflenetv2_w2']
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import conv1x1, depthwise_conv3x3, conv1x1_block, conv3x3_block, ChannelShuffle, SEBlock, SimpleSequential
class ShuffleUnit(Chain):
"""
ShuffleNetV2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
downsample : bool
Whether do downsample.
use_se : bool
Whether to use SE block.
use_residual : bool
Whether to use residual connection.
"""
def __init__(self,
in_channels,
out_channels,
downsample,
use_se,
use_residual):
super(ShuffleUnit, self).__init__()
self.downsample = downsample
self.use_se = use_se
self.use_residual = use_residual
mid_channels = out_channels // 2
with self.init_scope():
self.compress_conv1 = conv1x1(
in_channels=(in_channels if self.downsample else mid_channels),
out_channels=mid_channels)
self.compress_bn1 = L.BatchNormalization(
size=mid_channels,
eps=1e-5)
self.dw_conv2 = depthwise_conv3x3(
channels=mid_channels,
stride=(2 if self.downsample else 1))
self.dw_bn2 = L.BatchNormalization(
size=mid_channels,
eps=1e-5)
self.expand_conv3 = conv1x1(
in_channels=mid_channels,
out_channels=mid_channels)
self.expand_bn3 = L.BatchNormalization(
size=mid_channels,
eps=1e-5)
if self.use_se:
self.se = SEBlock(channels=mid_channels)
if downsample:
self.dw_conv4 = depthwise_conv3x3(
channels=in_channels,
stride=2)
self.dw_bn4 = L.BatchNormalization(
size=in_channels,
eps=1e-5)
self.expand_conv5 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.expand_bn5 = L.BatchNormalization(
size=mid_channels,
eps=1e-5)
self.activ = F.relu
self.c_shuffle = ChannelShuffle(
channels=out_channels,
groups=2)
def __call__(self, x):
if self.downsample:
y1 = self.dw_conv4(x)
y1 = self.dw_bn4(y1)
y1 = self.expand_conv5(y1)
y1 = self.expand_bn5(y1)
y1 = self.activ(y1)
x2 = x
else:
y1, x2 = F.split_axis(x, indices_or_sections=2, axis=1)
y2 = self.compress_conv1(x2)
y2 = self.compress_bn1(y2)
y2 = self.activ(y2)
y2 = self.dw_conv2(y2)
y2 = self.dw_bn2(y2)
y2 = self.expand_conv3(y2)
y2 = self.expand_bn3(y2)
y2 = self.activ(y2)
if self.use_se:
y2 = self.se(y2)
if self.use_residual and not self.downsample:
y2 = y2 + x2
x = F.concat((y1, y2), axis=1)
x = self.c_shuffle(x)
return x
class ShuffleInitBlock(Chain):
"""
ShuffleNetV2 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(ShuffleInitBlock, self).__init__()
with self.init_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2)
self.pool = partial(
F.max_pooling_2d,
ksize=3,
stride=2,
pad=0,
cover_all=False)
def __call__(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class ShuffleNetV2(Chain):
"""
ShuffleNetV2 model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
use_se : bool, default False
Whether to use SE block.
use_residual : bool, default False
Whether to use residual connections.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
use_se=False,
use_residual=False,
in_channels=3,
in_size=(224, 224),
classes=1000):
super(ShuffleNetV2, self).__init__()
self.in_size = in_size
self.classes = classes
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
setattr(self.features, "init_block", ShuffleInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential()
with stage.init_scope():
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
setattr(stage, "unit{}".format(j + 1), ShuffleUnit(
in_channels=in_channels,
out_channels=out_channels,
downsample=downsample,
use_se=use_se,
use_residual=use_residual))
in_channels = out_channels
setattr(self.features, "stage{}".format(i + 1), stage)
setattr(self.features, "final_block", conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels))
in_channels = final_block_channels
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=7,
stride=1))
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, "flatten", partial(
F.reshape,
shape=(-1, in_channels)))
setattr(self.output, "fc", L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_shufflenetv2(width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create ShuffleNetV2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
init_block_channels = 24
final_block_channels = 1024
layers = [4, 8, 4]
channels_per_layers = [116, 232, 464]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if width_scale > 1.5:
final_block_channels = int(final_block_channels * width_scale)
net = ShuffleNetV2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def shufflenetv2_wd2(**kwargs):
"""
ShuffleNetV2 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(12.0 / 29.0), model_name="shufflenetv2_wd2", **kwargs)
def shufflenetv2_w1(**kwargs):
"""
ShuffleNetV2 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=1.0, model_name="shufflenetv2_w1", **kwargs)
def shufflenetv2_w3d2(**kwargs):
"""
ShuffleNetV2 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(44.0 / 29.0), model_name="shufflenetv2_w3d2", **kwargs)
def shufflenetv2_w2(**kwargs):
"""
ShuffleNetV2 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(61.0 / 29.0), model_name="shufflenetv2_w2", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
shufflenetv2_wd2,
shufflenetv2_w1,
shufflenetv2_w3d2,
shufflenetv2_w2,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenetv2_wd2 or weight_count == 1366792)
assert (model != shufflenetv2_w1 or weight_count == 2278604)
assert (model != shufflenetv2_w3d2 or weight_count == 4406098)
assert (model != shufflenetv2_w2 or weight_count == 7601686)
x = np.zeros((1, 3, 224, 224), np.float32)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 12,350
| 32.291105
| 119
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/fishnet.py
|
"""
FishNet for ImageNet-1K, implemented in Chainer.
Original paper: 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
"""
__all__ = ['FishNet', 'fishnet99', 'fishnet150']
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import pre_conv1x1_block, pre_conv3x3_block, conv1x1, SesquialteralHourglass, SimpleSequential
from .preresnet import PreResActivation
from .senet import SEInitBlock
def channel_squeeze(x,
groups):
"""
Channel squeeze operation.
Parameters:
----------
x : chainer.Variable or numpy.ndarray or cupy.ndarray
Input variable.
groups : int
Number of groups.
Returns:
-------
chainer.Variable or numpy.ndarray or cupy.ndarray
Resulted variable.
"""
batch, channels, height, width = x.shape
channels_per_group = channels // groups
x = F.reshape(x, shape=(batch, channels_per_group, groups, height, width))
x = F.sum(x, axis=2)
return x
class ChannelSqueeze(Chain):
"""
Channel squeeze layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelSqueeze, self).__init__()
assert (channels % groups == 0)
self.groups = groups
def __call__(self, x):
return channel_squeeze(x, self.groups)
class InterpolationBlock(Chain):
"""
Interpolation block.
Parameters:
----------
scale_factor : int
Multiplier for spatial size.
"""
def __init__(self,
scale_factor):
super(InterpolationBlock, self).__init__()
self.scale_factor = scale_factor
def __call__(self, x):
return F.unpooling_2d(
x=x,
ksize=self.scale_factor,
cover_all=False)
class PreSEAttBlock(Chain):
"""
FishNet specific Squeeze-and-Excitation attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
reduction : int, default 16
Squeeze reduction value.
"""
def __init__(self,
in_channels,
out_channels,
reduction=16):
super(PreSEAttBlock, self).__init__()
mid_cannels = out_channels // reduction
with self.init_scope():
self.bn = L.BatchNormalization(
size=in_channels,
eps=1e-5)
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_cannels,
use_bias=True)
self.conv2 = conv1x1(
in_channels=mid_cannels,
out_channels=out_channels,
use_bias=True)
def __call__(self, x):
x = self.bn(x)
x = F.relu(x)
x = F.average_pooling_2d(x, ksize=x.shape[2:])
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.sigmoid(x)
return x
class FishBottleneck(Chain):
"""
FishNet bottleneck block for residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
dilate : int or tuple/list of 2 int
Dilation value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
stride,
dilate):
super(FishBottleneck, self).__init__()
mid_channels = out_channels // 4
with self.init_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
pad=dilate,
dilate=dilate)
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels)
def __call__(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class FishBlock(Chain):
"""
FishNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
squeeze : bool, default False
Whether to use a channel squeeze operation.
"""
def __init__(self,
in_channels,
out_channels,
stride=1,
dilate=1,
squeeze=False):
super(FishBlock, self).__init__()
self.squeeze = squeeze
self.resize_identity = (in_channels != out_channels) or (stride != 1)
with self.init_scope():
self.body = FishBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
dilate=dilate)
if self.squeeze:
assert (in_channels // 2 == out_channels)
self.c_squeeze = ChannelSqueeze(
channels=in_channels,
groups=2)
elif self.resize_identity:
self.identity_conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
def __call__(self, x):
if self.squeeze:
identity = self.c_squeeze(x)
elif self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
return x
class DownUnit(Chain):
"""
FishNet down unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
"""
def __init__(self,
in_channels,
out_channels_list):
super(DownUnit, self).__init__()
with self.init_scope():
self.blocks = SimpleSequential()
with self.blocks.init_scope():
for i, out_channels in enumerate(out_channels_list):
setattr(self.blocks, "block{}".format(i + 1), FishBlock(
in_channels=in_channels,
out_channels=out_channels))
in_channels = out_channels
self.pool = partial(
F.max_pooling_2d,
ksize=2,
stride=2,
cover_all=False)
def __call__(self, x):
x = self.blocks(x)
x = self.pool(x)
return x
class UpUnit(Chain):
"""
FishNet up unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels_list,
dilate=1):
super(UpUnit, self).__init__()
with self.init_scope():
self.blocks = SimpleSequential()
with self.blocks.init_scope():
for i, out_channels in enumerate(out_channels_list):
squeeze = (dilate > 1) and (i == 0)
setattr(self.blocks, "block{}".format(i + 1), FishBlock(
in_channels=in_channels,
out_channels=out_channels,
dilate=dilate,
squeeze=squeeze))
in_channels = out_channels
self.upsample = InterpolationBlock(scale_factor=2)
def __call__(self, x):
x = self.blocks(x)
x = self.upsample(x)
return x
class SkipUnit(Chain):
"""
FishNet skip connection unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
"""
def __init__(self,
in_channels,
out_channels_list):
super(SkipUnit, self).__init__()
with self.init_scope():
self.blocks = SimpleSequential()
with self.blocks.init_scope():
for i, out_channels in enumerate(out_channels_list):
setattr(self.blocks, "block{}".format(i + 1), FishBlock(
in_channels=in_channels,
out_channels=out_channels))
in_channels = out_channels
def __call__(self, x):
x = self.blocks(x)
return x
class SkipAttUnit(Chain):
"""
FishNet skip connection unit with attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
"""
def __init__(self,
in_channels,
out_channels_list):
super(SkipAttUnit, self).__init__()
mid_channels1 = in_channels // 2
mid_channels2 = 2 * in_channels
with self.init_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels1)
self.conv2 = pre_conv1x1_block(
in_channels=mid_channels1,
out_channels=mid_channels2,
use_bias=True)
in_channels = mid_channels2
self.se = PreSEAttBlock(
in_channels=mid_channels2,
out_channels=out_channels_list[-1])
self.blocks = SimpleSequential()
with self.blocks.init_scope():
for i, out_channels in enumerate(out_channels_list):
setattr(self.blocks, "block{}".format(i + 1), FishBlock(
in_channels=in_channels,
out_channels=out_channels))
in_channels = out_channels
def __call__(self, x):
x = self.conv1(x)
x = self.conv2(x)
w = self.se(x)
x = self.blocks(x)
x = x * w + w
return x
class FishFinalBlock(Chain):
"""
FishNet final block.
Parameters:
----------
in_channels : int
Number of input channels.
"""
def __init__(self,
in_channels):
super(FishFinalBlock, self).__init__()
mid_channels = in_channels // 2
with self.init_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.preactiv = PreResActivation(
in_channels=mid_channels)
def __call__(self, x):
x = self.conv1(x)
x = self.preactiv(x)
return x
class FishNet(Chain):
"""
FishNet model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
direct_channels : list of list of list of int
Number of output channels for each unit along the straight path.
skip_channels : list of list of list of int
Number of output channels for each skip connection unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
direct_channels,
skip_channels,
init_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000):
super(FishNet, self).__init__()
self.in_size = in_size
self.classes = classes
depth = len(direct_channels[0])
down1_channels = direct_channels[0]
up_channels = direct_channels[1]
down2_channels = direct_channels[2]
skip1_channels = skip_channels[0]
skip2_channels = skip_channels[1]
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
setattr(self.features, "init_block", SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
down1_seq = SimpleSequential()
skip1_seq = SimpleSequential()
for i in range(depth + 1):
skip1_channels_list = skip1_channels[i]
if i < depth:
with skip1_seq.init_scope():
setattr(skip1_seq, "unit{}".format(i + 1), SkipUnit(
in_channels=in_channels,
out_channels_list=skip1_channels_list))
down1_channels_list = down1_channels[i]
with down1_seq.init_scope():
setattr(down1_seq, "unit{}".format(i + 1), DownUnit(
in_channels=in_channels,
out_channels_list=down1_channels_list))
in_channels = down1_channels_list[-1]
else:
with skip1_seq.init_scope():
setattr(skip1_seq, "unit{}".format(i + 1), SkipAttUnit(
in_channels=in_channels,
out_channels_list=skip1_channels_list))
in_channels = skip1_channels_list[-1]
up_seq = SimpleSequential()
skip2_seq = SimpleSequential()
for i in range(depth + 1):
skip2_channels_list = skip2_channels[i]
if i > 0:
in_channels += skip1_channels[depth - i][-1]
if i < depth:
with skip2_seq.init_scope():
setattr(skip2_seq, "unit{}".format(i + 1), SkipUnit(
in_channels=in_channels,
out_channels_list=skip2_channels_list))
up_channels_list = up_channels[i]
dilate = 2 ** i
with up_seq.init_scope():
setattr(up_seq, "unit{}".format(i + 1), UpUnit(
in_channels=in_channels,
out_channels_list=up_channels_list,
dilate=dilate))
in_channels = up_channels_list[-1]
else:
with skip2_seq.init_scope():
setattr(skip2_seq, "unit{}".format(i + 1), F.identity)
down2_seq = SimpleSequential()
with down2_seq.init_scope():
for i in range(depth):
down2_channels_list = down2_channels[i]
setattr(down2_seq, "unit{}".format(i + 1), DownUnit(
in_channels=in_channels,
out_channels_list=down2_channels_list))
in_channels = down2_channels_list[-1] + skip2_channels[depth - 1 - i][-1]
setattr(self.features, "hg", SesquialteralHourglass(
down1_seq=down1_seq,
skip1_seq=skip1_seq,
up_seq=up_seq,
skip2_seq=skip2_seq,
down2_seq=down2_seq))
setattr(self.features, "final_block", FishFinalBlock(in_channels=in_channels))
in_channels = in_channels // 2
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=7,
stride=1))
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, "final_conv", conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True))
setattr(self.output, "final_flatten", partial(
F.reshape,
shape=(-1, classes)))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_fishnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create FishNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
if blocks == 99:
direct_layers = [[2, 2, 6], [1, 1, 1], [1, 2, 2]]
skip_layers = [[1, 1, 1, 2], [4, 1, 1, 0]]
elif blocks == 150:
direct_layers = [[2, 4, 8], [2, 2, 2], [2, 2, 4]]
skip_layers = [[2, 2, 2, 4], [4, 2, 2, 0]]
else:
raise ValueError("Unsupported FishNet with number of blocks: {}".format(blocks))
direct_channels_per_layers = [[128, 256, 512], [512, 384, 256], [320, 832, 1600]]
skip_channels_per_layers = [[64, 128, 256, 512], [512, 768, 512, 0]]
direct_channels = [[[b] * c for (b, c) in zip(*a)] for a in
([(ci, li) for (ci, li) in zip(direct_channels_per_layers, direct_layers)])]
skip_channels = [[[b] * c for (b, c) in zip(*a)] for a in
([(ci, li) for (ci, li) in zip(skip_channels_per_layers, skip_layers)])]
init_block_channels = 64
net = FishNet(
direct_channels=direct_channels,
skip_channels=skip_channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def fishnet99(**kwargs):
"""
FishNet-99 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_fishnet(blocks=99, model_name="fishnet99", **kwargs)
def fishnet150(**kwargs):
"""
FishNet-150 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_fishnet(blocks=150, model_name="fishnet150", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
fishnet99,
fishnet150,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fishnet99 or weight_count == 16628904)
assert (model != fishnet150 or weight_count == 24959400)
x = np.zeros((1, 3, 224, 224), np.float32)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 21,336
| 31.625382
| 115
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/hrnet.py
|
"""
HRNet for ImageNet-1K, implemented in Chainer.
Original paper: 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
"""
__all__ = ['hrnet_w18_small_v1', 'hrnet_w18_small_v2', 'hrnetv2_w18', 'hrnetv2_w30', 'hrnetv2_w32', 'hrnetv2_w40',
'hrnetv2_w44', 'hrnetv2_w48', 'hrnetv2_w64']
import os
from inspect import isfunction
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import conv1x1_block, conv3x3_block, SimpleSequential
from .resnet import ResUnit
class UpSamplingBlock(Chain):
"""
HFNet specific upsampling block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
scale_factor : int
Multiplier for spatial size.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor):
super(UpSamplingBlock, self).__init__()
self.scale_factor = scale_factor
with self.init_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=1,
activation=None)
def __call__(self, x):
x = self.conv(x)
return F.unpooling_2d(
x=x,
ksize=self.scale_factor,
cover_all=False)
class HRBlock(Chain):
"""
HFNet block.
Parameters:
----------
in_channels_list : list of int
Number of input channels.
out_channels_list : list of int
Number of output channels.
num_branches : int
Number of branches.
num_subblocks : list of int
Number of subblock.
"""
def __init__(self,
in_channels_list,
out_channels_list,
num_branches,
num_subblocks):
super(HRBlock, self).__init__()
self.in_channels_list = in_channels_list
self.num_branches = num_branches
with self.init_scope():
self.branches = SimpleSequential()
with self.branches.init_scope():
for i in range(num_branches):
layers = SimpleSequential()
with layers.init_scope():
in_channels_i = self.in_channels_list[i]
out_channels_i = out_channels_list[i]
for j in range(num_subblocks[i]):
setattr(layers, "unit{}".format(j + 1), ResUnit(
in_channels=in_channels_i,
out_channels=out_channels_i,
stride=1,
bottleneck=False))
in_channels_i = out_channels_i
self.in_channels_list[i] = out_channels_i
setattr(self.branches, "branch{}".format(i + 1), layers)
if num_branches > 1:
self.fuse_layers = SimpleSequential()
with self.fuse_layers.init_scope():
for i in range(num_branches):
fuse_layer = SimpleSequential()
with fuse_layer.init_scope():
for j in range(num_branches):
if j > i:
setattr(fuse_layer, "block{}".format(j + 1), UpSamplingBlock(
in_channels=in_channels_list[j],
out_channels=in_channels_list[i],
scale_factor=2 ** (j - i)))
elif j == i:
setattr(fuse_layer, "block{}".format(j + 1), F.identity)
else:
conv3x3_seq = SimpleSequential()
with conv3x3_seq.init_scope():
for k in range(i - j):
if k == i - j - 1:
setattr(conv3x3_seq, "subblock{}".format(k + 1), conv3x3_block(
in_channels=in_channels_list[j],
out_channels=in_channels_list[i],
stride=2,
activation=None))
else:
setattr(conv3x3_seq, "subblock{}".format(k + 1), conv3x3_block(
in_channels=in_channels_list[j],
out_channels=in_channels_list[j],
stride=2))
setattr(fuse_layer, "block{}".format(j + 1), conv3x3_seq)
setattr(self.fuse_layers, "layer{}".format(i + 1), fuse_layer)
self.activ = F.relu
def __call__(self, x):
for i in range(self.num_branches):
x[i] = self.branches.el(i)(x[i])
if self.num_branches == 1:
return x
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers.el(i).el(0)(x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
else:
y = y + self.fuse_layers.el(i).el(j)(x[j])
x_fuse.append(self.activ(y))
return x_fuse
class HRStage(Chain):
"""
HRNet stage block.
Parameters:
----------
in_channels_list : list of int
Number of output channels from the previous layer.
out_channels_list : list of int
Number of output channels in the current layer.
num_modules : int
Number of modules.
num_branches : int
Number of branches.
num_subblocks : list of int
Number of subblocks.
"""
def __init__(self,
in_channels_list,
out_channels_list,
num_modules,
num_branches,
num_subblocks):
super(HRStage, self).__init__()
self.branches = num_branches
self.in_channels_list = out_channels_list
in_branches = len(in_channels_list)
out_branches = len(out_channels_list)
with self.init_scope():
self.transition = SimpleSequential()
with self.transition.init_scope():
for i in range(out_branches):
if i < in_branches:
if out_channels_list[i] != in_channels_list[i]:
setattr(self.transition, "block{}".format(i + 1), conv3x3_block(
in_channels=in_channels_list[i],
out_channels=out_channels_list[i],
stride=1))
else:
setattr(self.transition, "block{}".format(i + 1), F.identity)
else:
conv3x3_seq = SimpleSequential()
with conv3x3_seq.init_scope():
for j in range(i + 1 - in_branches):
in_channels_i = in_channels_list[-1]
out_channels_i = out_channels_list[i] if j == i - in_branches else in_channels_i
setattr(conv3x3_seq, "subblock{}".format(j + 1), conv3x3_block(
in_channels=in_channels_i,
out_channels=out_channels_i,
stride=2))
setattr(self.transition, "block{}".format(i + 1), conv3x3_seq)
self.layers = SimpleSequential()
with self.layers.init_scope():
for i in range(num_modules):
block = HRBlock(
in_channels_list=self.in_channels_list,
out_channels_list=out_channels_list,
num_branches=num_branches,
num_subblocks=num_subblocks)
setattr(self.layers, "block{}".format(i + 1), block)
self.in_channels_list = block.in_channels_list
def __call__(self, x):
x_list = []
for j in range(self.branches):
if not isfunction(self.transition.el(j)):
x_list.append(self.transition.el(j)(x[-1] if type(x) is list else x))
else:
x_list_j = x[j] if type(x) is list else x
x_list.append(x_list_j)
y_list = self.layers(x_list)
return y_list
class HRInitBlock(Chain):
"""
HRNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
num_subblocks : int
Number of subblocks.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
num_subblocks):
super(HRInitBlock, self).__init__()
with self.init_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=2)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=2)
in_channels = mid_channels
self.subblocks = SimpleSequential()
with self.subblocks.init_scope():
for i in range(num_subblocks):
setattr(self.subblocks, "block{}".format(i + 1), ResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=1,
bottleneck=True))
in_channels = out_channels
def __call__(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.subblocks(x)
return x
class HRFinalBlock(Chain):
"""
HRNet specific final block.
Parameters:
----------
in_channels_list : list of int
Number of input channels per stage.
out_channels_list : list of int
Number of output channels per stage.
"""
def __init__(self,
in_channels_list,
out_channels_list):
super(HRFinalBlock, self).__init__()
with self.init_scope():
self.inc_blocks = SimpleSequential()
with self.inc_blocks.init_scope():
for i, in_channels_i in enumerate(in_channels_list):
setattr(self.inc_blocks, "block{}".format(i + 1), ResUnit(
in_channels=in_channels_i,
out_channels=out_channels_list[i],
stride=1,
bottleneck=True))
self.down_blocks = SimpleSequential()
with self.down_blocks.init_scope():
for i in range(len(in_channels_list) - 1):
setattr(self.down_blocks, "block{}".format(i + 1), conv3x3_block(
in_channels=out_channels_list[i],
out_channels=out_channels_list[i + 1],
stride=2,
use_bias=True))
self.final_layer = conv1x1_block(
in_channels=1024,
out_channels=2048,
stride=1,
use_bias=True)
def __call__(self, x):
y = self.inc_blocks.el(0)(x[0])
for i in range(len(self.down_blocks)):
y = self.inc_blocks.el(i + 1)(x[i + 1]) + self.down_blocks.el(i)(y)
y = self.final_layer(y)
return y
class HRNet(Chain):
"""
HRNet model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
channels : list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
init_num_subblocks : int
Number of subblocks in the initial unit.
num_modules : int
Number of modules per stage.
num_subblocks : list of int
Number of subblocks per stage.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
init_num_subblocks,
num_modules,
num_subblocks,
in_channels=3,
in_size=(224, 224),
classes=1000):
super(HRNet, self).__init__()
self.in_size = in_size
self.classes = classes
self.branches = [2, 3, 4]
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
setattr(self.features, "init_block", HRInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
mid_channels=64,
num_subblocks=init_num_subblocks))
in_channels_list = [init_block_channels]
for i in range(len(self.branches)):
stage = HRStage(
in_channels_list=in_channels_list,
out_channels_list=channels[i],
num_modules=num_modules[i],
num_branches=self.branches[i],
num_subblocks=num_subblocks[i])
setattr(self.features, "stage{}".format(i + 1), stage)
in_channels_list = stage.in_channels_list
setattr(self.features, "final_block", HRFinalBlock(
in_channels_list=in_channels_list,
out_channels_list=[128, 256, 512, 1024]))
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=7,
stride=1))
in_channels = 2048
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, "flatten", partial(
F.reshape,
shape=(-1, in_channels)))
setattr(self.output, "fc", L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_hrnet(version,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create HRNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('s' or 'm').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
if version == "w18s1":
init_block_channels = 128
init_num_subblocks = 1
channels = [[16, 32], [16, 32, 64], [16, 32, 64, 128]]
num_modules = [1, 1, 1]
elif version == "w18s2":
init_block_channels = 256
init_num_subblocks = 2
channels = [[18, 36], [18, 36, 72], [18, 36, 72, 144]]
num_modules = [1, 3, 2]
elif version == "w18":
init_block_channels = 256
init_num_subblocks = 4
channels = [[18, 36], [18, 36, 72], [18, 36, 72, 144]]
num_modules = [1, 4, 3]
elif version == "w30":
init_block_channels = 256
init_num_subblocks = 4
channels = [[30, 60], [30, 60, 120], [30, 60, 120, 240]]
num_modules = [1, 4, 3]
elif version == "w32":
init_block_channels = 256
init_num_subblocks = 4
channels = [[32, 64], [32, 64, 128], [32, 64, 128, 256]]
num_modules = [1, 4, 3]
elif version == "w40":
init_block_channels = 256
init_num_subblocks = 4
channels = [[40, 80], [40, 80, 160], [40, 80, 160, 320]]
num_modules = [1, 4, 3]
elif version == "w44":
init_block_channels = 256
init_num_subblocks = 4
channels = [[44, 88], [44, 88, 176], [44, 88, 176, 352]]
num_modules = [1, 4, 3]
elif version == "w48":
init_block_channels = 256
init_num_subblocks = 4
channels = [[48, 96], [48, 96, 192], [48, 96, 192, 384]]
num_modules = [1, 4, 3]
elif version == "w64":
init_block_channels = 256
init_num_subblocks = 4
channels = [[64, 128], [64, 128, 256], [64, 128, 256, 512]]
num_modules = [1, 4, 3]
else:
raise ValueError("Unsupported HRNet version {}".format(version))
num_subblocks = [[max(2, init_num_subblocks)] * len(ci) for ci in channels]
net = HRNet(
channels=channels,
init_block_channels=init_block_channels,
init_num_subblocks=init_num_subblocks,
num_modules=num_modules,
num_subblocks=num_subblocks,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def hrnet_w18_small_v1(**kwargs):
"""
HRNet-W18 Small V1 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w18s1", model_name="hrnet_w18_small_v1", **kwargs)
def hrnet_w18_small_v2(**kwargs):
"""
HRNet-W18 Small V2 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w18s2", model_name="hrnet_w18_small_v2", **kwargs)
def hrnetv2_w18(**kwargs):
"""
HRNetV2-W18 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w18", model_name="hrnetv2_w18", **kwargs)
def hrnetv2_w30(**kwargs):
"""
HRNetV2-W30 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w30", model_name="hrnetv2_w30", **kwargs)
def hrnetv2_w32(**kwargs):
"""
HRNetV2-W32 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w32", model_name="hrnetv2_w32", **kwargs)
def hrnetv2_w40(**kwargs):
"""
HRNetV2-W40 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w40", model_name="hrnetv2_w40", **kwargs)
def hrnetv2_w44(**kwargs):
"""
HRNetV2-W44 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w44", model_name="hrnetv2_w44", **kwargs)
def hrnetv2_w48(**kwargs):
"""
HRNetV2-W48 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w48", model_name="hrnetv2_w48", **kwargs)
def hrnetv2_w64(**kwargs):
"""
HRNetV2-W64 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w64", model_name="hrnetv2_w64", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
hrnet_w18_small_v1,
hrnet_w18_small_v2,
hrnetv2_w18,
hrnetv2_w30,
hrnetv2_w32,
hrnetv2_w40,
hrnetv2_w44,
hrnetv2_w48,
hrnetv2_w64,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != hrnet_w18_small_v1 or weight_count == 13187464)
assert (model != hrnet_w18_small_v2 or weight_count == 15597464)
assert (model != hrnetv2_w18 or weight_count == 21299004)
assert (model != hrnetv2_w30 or weight_count == 37712220)
assert (model != hrnetv2_w32 or weight_count == 41232680)
assert (model != hrnetv2_w40 or weight_count == 57557160)
assert (model != hrnetv2_w44 or weight_count == 67064984)
assert (model != hrnetv2_w48 or weight_count == 77469864)
assert (model != hrnetv2_w64 or weight_count == 128059944)
x = np.zeros((1, 3, 224, 224), np.float32)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 23,990
| 34.914671
| 115
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/fcn8sd.py
|
"""
FCN-8s(d) for image segmentation, implemented in Chainer.
Original paper: 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038.
"""
__all__ = ['FCN8sd', 'fcn8sd_resnetd50b_voc', 'fcn8sd_resnetd101b_voc', 'fcn8sd_resnetd50b_coco',
'fcn8sd_resnetd101b_coco', 'fcn8sd_resnetd50b_ade20k', 'fcn8sd_resnetd101b_ade20k',
'fcn8sd_resnetd50b_cityscapes', 'fcn8sd_resnetd101b_cityscapes']
import os
import chainer.functions as F
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import conv1x1, conv3x3_block
from .resnetd import resnetd50b, resnetd101b
class FCNFinalBlock(Chain):
"""
FCN-8s(d) final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
bottleneck_factor=4):
super(FCNFinalBlock, self).__init__()
assert (in_channels % bottleneck_factor == 0)
mid_channels = in_channels // bottleneck_factor
with self.init_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.dropout = partial(
F.dropout,
ratio=0.1)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
def __call__(self, x, out_size):
x = self.conv1(x)
x = self.dropout(x)
x = self.conv2(x)
x = F.resize_images(x, output_shape=out_size)
return x
class FCN8sd(Chain):
"""
FCN-8s(d) model from 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038.
It is an experimental model mixed FCN-8s and PSPNet.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=21):
super(FCN8sd, self).__init__()
assert (in_channels > 0)
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
with self.init_scope():
self.backbone = backbone
pool_out_channels = backbone_out_channels
self.final_block = FCNFinalBlock(
in_channels=pool_out_channels,
out_channels=classes)
if self.aux:
aux_out_channels = backbone_out_channels // 2
self.aux_block = FCNFinalBlock(
in_channels=aux_out_channels,
out_channels=classes)
def forward(self, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
x, y = self.backbone(x)
x = self.final_block(x, in_size)
if self.aux:
y = self.aux_block(y, in_size)
return x, y
else:
return x
def get_fcn8sd(backbone,
classes,
aux=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create FCN-8s(d) model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
net = FCN8sd(
backbone=backbone,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def fcn8sd_resnetd50b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for Pascal VOC from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd50b_voc", **kwargs)
def fcn8sd_resnetd101b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for Pascal VOC from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd101b_voc",
**kwargs)
def fcn8sd_resnetd50b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for COCO from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd50b_coco",
**kwargs)
def fcn8sd_resnetd101b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for COCO from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd101b_coco",
**kwargs)
def fcn8sd_resnetd50b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for ADE20K from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd50b_ade20k",
**kwargs)
def fcn8sd_resnetd101b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for ADE20K from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd101b_ade20k",
**kwargs)
def fcn8sd_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for Cityscapes from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd50b_cityscapes",
**kwargs)
def fcn8sd_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for Cityscapes from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone.final_pool
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd101b_cityscapes",
**kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
in_size = (480, 480)
aux = False
pretrained = False
models = [
(fcn8sd_resnetd50b_voc, 21),
(fcn8sd_resnetd101b_voc, 21),
(fcn8sd_resnetd50b_coco, 21),
(fcn8sd_resnetd101b_coco, 21),
(fcn8sd_resnetd50b_ade20k, 150),
(fcn8sd_resnetd101b_ade20k, 150),
(fcn8sd_resnetd50b_cityscapes, 19),
(fcn8sd_resnetd101b_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != fcn8sd_resnetd50b_voc or weight_count == 35445994)
assert (model != fcn8sd_resnetd101b_voc or weight_count == 54438122)
assert (model != fcn8sd_resnetd50b_coco or weight_count == 35445994)
assert (model != fcn8sd_resnetd101b_coco or weight_count == 54438122)
assert (model != fcn8sd_resnetd50b_ade20k or weight_count == 35545324)
assert (model != fcn8sd_resnetd101b_ade20k or weight_count == 54537452)
assert (model != fcn8sd_resnetd50b_cityscapes or weight_count == 35444454)
assert (model != fcn8sd_resnetd101b_cityscapes or weight_count == 54436582)
else:
assert (model != fcn8sd_resnetd50b_voc or weight_count == 33080789)
assert (model != fcn8sd_resnetd101b_voc or weight_count == 52072917)
assert (model != fcn8sd_resnetd50b_coco or weight_count == 33080789)
assert (model != fcn8sd_resnetd101b_coco or weight_count == 52072917)
assert (model != fcn8sd_resnetd50b_ade20k or weight_count == 33146966)
assert (model != fcn8sd_resnetd101b_ade20k or weight_count == 52139094)
assert (model != fcn8sd_resnetd50b_cityscapes or weight_count == 33079763)
assert (model != fcn8sd_resnetd101b_cityscapes or weight_count == 52071891)
x = np.zeros((1, 3, in_size[0], in_size[1]), np.float32)
ys = net(x)
y = ys[0] if aux else ys
assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and
(y.shape[3] == x.shape[3]))
if __name__ == "__main__":
_test()
| 15,722
| 37.34878
| 115
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/selecsls.py
|
"""
SelecSLS for ImageNet-1K, implemented in Chainer.
Original paper: 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
"""
__all__ = ['SelecSLS', 'selecsls42', 'selecsls42b', 'selecsls60', 'selecsls60b', 'selecsls84']
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import conv1x1_block, conv3x3_block, DualPathSequential, SimpleSequential
class SelecSLSBlock(Chain):
"""
SelecSLS block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(SelecSLSBlock, self).__init__()
mid_channels = 2 * out_channels
with self.init_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels)
def __call__(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class SelecSLSUnit(Chain):
"""
SelecSLS unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
skip_channels : int
Number of skipped channels.
mid_channels : int
Number of middle channels.
stride : int or tuple/list of 2 int
Stride of the branch convolution layers.
"""
def __init__(self,
in_channels,
out_channels,
skip_channels,
mid_channels,
stride):
super(SelecSLSUnit, self).__init__()
self.resize = (stride == 2)
mid2_channels = mid_channels // 2
last_channels = 2 * mid_channels + (skip_channels if stride == 1 else 0)
with self.init_scope():
self.branch1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=stride)
self.branch2 = SelecSLSBlock(
in_channels=mid_channels,
out_channels=mid2_channels)
self.branch3 = SelecSLSBlock(
in_channels=mid2_channels,
out_channels=mid2_channels)
self.last_conv = conv1x1_block(
in_channels=last_channels,
out_channels=out_channels)
def __call__(self, x, x0=None):
x1 = self.branch1(x)
x2 = self.branch2(x1)
x3 = self.branch3(x2)
if self.resize:
y = F.concat((x1, x2, x3), axis=1)
y = self.last_conv(y)
return y, y
else:
y = F.concat((x1, x2, x3, x0), axis=1)
y = self.last_conv(y)
return y, x0
class SelecSLS(Chain):
"""
SelecSLS model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
skip_channels : list of list of int
Number of skipped channels for each unit.
mid_channels : list of list of int
Number of middle channels for each unit.
kernels3 : list of list of int/bool
Using 3x3 (instead of 1x1) kernel for each head unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
skip_channels,
mid_channels,
kernels3,
in_channels=3,
in_size=(224, 224),
classes=1000):
super(SelecSLS, self).__init__()
self.in_size = in_size
self.classes = classes
init_block_channels = 32
with self.init_scope():
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=(1 + len(kernels3)))
with self.features.init_scope():
setattr(self.features, "init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
stride=2))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
k = i - len(skip_channels)
stage = DualPathSequential() if k < 0 else SimpleSequential()
with stage.init_scope():
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if j == 0 else 1
if k < 0:
unit = SelecSLSUnit(
in_channels=in_channels,
out_channels=out_channels,
skip_channels=skip_channels[i][j],
mid_channels=mid_channels[i][j],
stride=stride)
else:
conv_block_class = conv3x3_block if kernels3[k][j] == 1 else conv1x1_block
unit = conv_block_class(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
setattr(stage, "unit{}".format(j + 1), unit)
in_channels = out_channels
setattr(self.features, "stage{}".format(i + 1), stage)
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=4,
stride=1))
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, "flatten", partial(
F.reshape,
shape=(-1, in_channels)))
setattr(self.output, "fc", L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_selecsls(version,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create SelecSLS model with specific parameters.
Parameters:
----------
version : str
Version of SelecSLS.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
if version in ("42", "42b"):
channels = [[64, 128], [144, 288], [304, 480]]
skip_channels = [[0, 64], [0, 144], [0, 304]]
mid_channels = [[64, 64], [144, 144], [304, 304]]
kernels3 = [[1, 1], [1, 0]]
if version == "42":
head_channels = [[960, 1024], [1024, 1280]]
else:
head_channels = [[960, 1024], [1280, 1024]]
elif version in ("60", "60b"):
channels = [[64, 128], [128, 128, 288], [288, 288, 288, 416]]
skip_channels = [[0, 64], [0, 128, 128], [0, 288, 288, 288]]
mid_channels = [[64, 64], [128, 128, 128], [288, 288, 288, 288]]
kernels3 = [[1, 1], [1, 0]]
if version == "60":
head_channels = [[756, 1024], [1024, 1280]]
else:
head_channels = [[756, 1024], [1280, 1024]]
elif version == "84":
channels = [[64, 144], [144, 144, 144, 144, 304], [304, 304, 304, 304, 304, 512]]
skip_channels = [[0, 64], [0, 144, 144, 144, 144], [0, 304, 304, 304, 304, 304]]
mid_channels = [[64, 64], [144, 144, 144, 144, 144], [304, 304, 304, 304, 304, 304]]
kernels3 = [[1, 1], [1, 1]]
head_channels = [[960, 1024], [1024, 1280]]
else:
raise ValueError("Unsupported SelecSLS version {}".format(version))
channels += head_channels
net = SelecSLS(
channels=channels,
skip_channels=skip_channels,
mid_channels=mid_channels,
kernels3=kernels3,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def selecsls42(**kwargs):
"""
SelecSLS-42 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="42", model_name="selecsls42", **kwargs)
def selecsls42b(**kwargs):
"""
SelecSLS-42b model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="42b", model_name="selecsls42b", **kwargs)
def selecsls60(**kwargs):
"""
SelecSLS-60 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="60", model_name="selecsls60", **kwargs)
def selecsls60b(**kwargs):
"""
SelecSLS-60b model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="60b", model_name="selecsls60b", **kwargs)
def selecsls84(**kwargs):
"""
SelecSLS-84 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="84", model_name="selecsls84", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
selecsls42,
selecsls42b,
selecsls60,
selecsls60b,
selecsls84,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != selecsls42 or weight_count == 30354952)
assert (model != selecsls42b or weight_count == 32458248)
assert (model != selecsls60 or weight_count == 30670768)
assert (model != selecsls60b or weight_count == 32774064)
assert (model != selecsls84 or weight_count == 50954600)
x = np.zeros((1, 3, 224, 224), np.float32)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 12,601
| 32.967655
| 115
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/inceptionv4.py
|
"""
InceptionV4 for ImageNet-1K, implemented in Chainer.
Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
"""
__all__ = ['InceptionV4', 'inceptionv4']
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import ConvBlock, conv3x3_block, SimpleSequential, Concurrent
from .inceptionv3 import MaxPoolBranch, AvgPoolBranch, Conv1x1Branch, ConvSeqBranch
class Conv3x3Branch(Chain):
"""
InceptionV4 specific convolutional 3x3 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
bn_eps):
super(Conv3x3Branch, self).__init__()
with self.init_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2,
pad=0,
bn_eps=bn_eps)
def __call__(self, x):
x = self.conv(x)
return x
class ConvSeq3x3Branch(Chain):
"""
InceptionV4 specific convolutional sequence branch block with splitting by 3x3.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels_list : list of tuple of int
List of numbers of output channels for middle layers.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels_list,
kernel_size_list,
strides_list,
padding_list,
bn_eps):
super(ConvSeq3x3Branch, self).__init__()
with self.init_scope():
self.conv_list = SimpleSequential()
with self.conv_list.init_scope():
for i, (mid_channels, kernel_size, strides, padding) in enumerate(zip(
mid_channels_list, kernel_size_list, strides_list, padding_list)):
setattr(self.conv_list, "conv{}".format(i + 1), ConvBlock(
in_channels=in_channels,
out_channels=mid_channels,
ksize=kernel_size,
stride=strides,
pad=padding,
bn_eps=bn_eps))
in_channels = mid_channels
self.conv1x3 = ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=(1, 3),
stride=1,
pad=(0, 1),
bn_eps=bn_eps)
self.conv3x1 = ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=(3, 1),
stride=1,
pad=(1, 0),
bn_eps=bn_eps)
def __call__(self, x):
x = self.conv_list(x)
y1 = self.conv1x3(x)
y2 = self.conv3x1(x)
x = F.concat((y1, y2), axis=1)
return x
class InceptionAUnit(Chain):
"""
InceptionV4 type Inception-A unit.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(InceptionAUnit, self).__init__()
in_channels = 384
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", Conv1x1Branch(
in_channels=in_channels,
out_channels=96,
bn_eps=bn_eps))
setattr(self.branches, "branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1),
bn_eps=bn_eps))
setattr(self.branches, "branch3", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96, 96),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_eps=bn_eps))
setattr(self.branches, "branch4", AvgPoolBranch(
in_channels=in_channels,
out_channels=96,
bn_eps=bn_eps,
count_include_pad=False))
def __call__(self, x):
x = self.branches(x)
return x
class ReductionAUnit(Chain):
"""
InceptionV4 type Reduction-A unit.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(ReductionAUnit, self).__init__()
in_channels = 384
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(384,),
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,),
bn_eps=bn_eps))
setattr(self.branches, "branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_eps=bn_eps))
setattr(self.branches, "branch3", MaxPoolBranch())
def __call__(self, x):
x = self.branches(x)
return x
class InceptionBUnit(Chain):
"""
InceptionV4 type Inception-B unit.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(InceptionBUnit, self).__init__()
in_channels = 1024
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", Conv1x1Branch(
in_channels=in_channels,
out_channels=384,
bn_eps=bn_eps))
setattr(self.branches, "branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
bn_eps=bn_eps))
setattr(self.branches, "branch3", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 192, 224, 224, 256),
kernel_size_list=(1, (7, 1), (1, 7), (7, 1), (1, 7)),
strides_list=(1, 1, 1, 1, 1),
padding_list=(0, (3, 0), (0, 3), (3, 0), (0, 3)),
bn_eps=bn_eps))
setattr(self.branches, "branch4", AvgPoolBranch(
in_channels=in_channels,
out_channels=128,
bn_eps=bn_eps,
count_include_pad=False))
def __call__(self, x):
x = self.branches(x)
return x
class ReductionBUnit(Chain):
"""
InceptionV4 type Reduction-B unit.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(ReductionBUnit, self).__init__()
in_channels = 1024
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 192),
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_eps=bn_eps))
setattr(self.branches, "branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 256, 320, 320),
kernel_size_list=(1, (1, 7), (7, 1), 3),
strides_list=(1, 1, 1, 2),
padding_list=(0, (0, 3), (3, 0), 0),
bn_eps=bn_eps))
setattr(self.branches, "branch3", MaxPoolBranch())
def __call__(self, x):
x = self.branches(x)
return x
class InceptionCUnit(Chain):
"""
InceptionV4 type Inception-C unit.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(InceptionCUnit, self).__init__()
in_channels = 1536
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", Conv1x1Branch(
in_channels=in_channels,
out_channels=256,
bn_eps=bn_eps))
setattr(self.branches, "branch2", ConvSeq3x3Branch(
in_channels=in_channels,
out_channels=256,
mid_channels_list=(384,),
kernel_size_list=(1,),
strides_list=(1,),
padding_list=(0,),
bn_eps=bn_eps))
setattr(self.branches, "branch3", ConvSeq3x3Branch(
in_channels=in_channels,
out_channels=256,
mid_channels_list=(384, 448, 512),
kernel_size_list=(1, (3, 1), (1, 3)),
strides_list=(1, 1, 1),
padding_list=(0, (1, 0), (0, 1)),
bn_eps=bn_eps))
setattr(self.branches, "branch4", AvgPoolBranch(
in_channels=in_channels,
out_channels=256,
bn_eps=bn_eps,
count_include_pad=False))
def __call__(self, x):
x = self.branches(x)
return x
class InceptBlock3a(Chain):
"""
InceptionV4 type Mixed-3a block.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(InceptBlock3a, self).__init__()
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", MaxPoolBranch())
setattr(self.branches, "branch2", Conv3x3Branch(
in_channels=64,
out_channels=96,
bn_eps=bn_eps))
def __call__(self, x):
x = self.branches(x)
return x
class InceptBlock4a(Chain):
"""
InceptionV4 type Mixed-4a block.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(InceptBlock4a, self).__init__()
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 96),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 0),
bn_eps=bn_eps))
setattr(self.branches, "branch2", ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 64, 64, 96),
kernel_size_list=(1, (1, 7), (7, 1), 3),
strides_list=(1, 1, 1, 1),
padding_list=(0, (0, 3), (3, 0), 0),
bn_eps=bn_eps))
def __call__(self, x):
x = self.branches(x)
return x
class InceptBlock5a(Chain):
"""
InceptionV4 type Mixed-5a block.
Parameters:
----------
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
bn_eps):
super(InceptBlock5a, self).__init__()
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", Conv3x3Branch(
in_channels=192,
out_channels=192,
bn_eps=bn_eps))
setattr(self.branches, "branch2", MaxPoolBranch())
def __call__(self, x):
x = self.branches(x)
return x
class InceptInitBlock(Chain):
"""
InceptionV4 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
bn_eps):
super(InceptInitBlock, self).__init__()
with self.init_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
stride=2,
pad=0,
bn_eps=bn_eps)
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
stride=1,
pad=0,
bn_eps=bn_eps)
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
stride=1,
pad=1,
bn_eps=bn_eps)
self.block1 = InceptBlock3a(bn_eps=bn_eps)
self.block2 = InceptBlock4a(bn_eps=bn_eps)
self.block3 = InceptBlock5a(bn_eps=bn_eps)
def __call__(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
return x
class InceptionV4(Chain):
"""
InceptionV4 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
dropout_rate=0.0,
bn_eps=1e-5,
in_channels=3,
in_size=(299, 299),
classes=1000):
super(InceptionV4, self).__init__()
self.in_size = in_size
self.classes = classes
layers = [4, 8, 4]
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
setattr(self.features, "init_block", InceptInitBlock(
in_channels=in_channels,
bn_eps=bn_eps))
for i, layers_per_stage in enumerate(layers):
stage = SimpleSequential()
with stage.init_scope():
for j in range(layers_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
else:
unit = normal_units[i]
setattr(stage, "unit{}".format(j + 1), unit(bn_eps=bn_eps))
setattr(self.features, "stage{}".format(i + 1), stage)
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=8,
stride=1))
in_channels = 1536
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, "flatten", partial(
F.reshape,
shape=(-1, in_channels)))
if dropout_rate > 0.0:
setattr(self.output, "dropout", partial(
F.dropout,
ratio=dropout_rate))
setattr(self.output, "fc", L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_inceptionv4(model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create InceptionV4 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
net = InceptionV4(**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def inceptionv4(**kwargs):
"""
InceptionV4 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_inceptionv4(model_name="inceptionv4", bn_eps=1e-3, **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
inceptionv4,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionv4 or weight_count == 42679816)
x = np.zeros((1, 3, 299, 299), np.float32)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 20,057
| 31.721044
| 115
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/regnet.py
|
"""
RegNet for ImageNet-1K, implemented in Chainer.
Original paper: 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
"""
__all__ = ['RegNet', 'regnetx002', 'regnetx004', 'regnetx006', 'regnetx008', 'regnetx016', 'regnetx032', 'regnetx040',
'regnetx064', 'regnetx080', 'regnetx120', 'regnetx160', 'regnetx320', 'regnety002', 'regnety004',
'regnety006', 'regnety008', 'regnety016', 'regnety032', 'regnety040', 'regnety064', 'regnety080',
'regnety120', 'regnety160', 'regnety320']
import os
import numpy as np
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import conv1x1_block, conv3x3_block, SEBlock, SimpleSequential
class RegNetBottleneck(Chain):
"""
RegNet bottleneck block for residual path in RegNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
groups : int
Number of groups.
use_se : bool
Whether to use SE-module.
bottleneck_factor : int, default 1
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
groups,
use_se,
bottleneck_factor=1):
super(RegNetBottleneck, self).__init__()
self.use_se = use_se
mid_channels = out_channels // bottleneck_factor
mid_groups = mid_channels // groups
with self.init_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
groups=mid_groups)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
mid_channels=(in_channels // 4))
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
activation=None)
def __call__(self, x):
x = self.conv1(x)
x = self.conv2(x)
if self.use_se:
x = self.se(x)
x = self.conv3(x)
return x
class RegNetUnit(Chain):
"""
RegNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
groups : int
Number of groups.
use_se : bool
Whether to use SE-module.
"""
def __init__(self,
in_channels,
out_channels,
stride,
groups,
use_se):
super(RegNetUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
with self.init_scope():
self.body = RegNetBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
groups=groups,
use_se=use_se)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = F.relu
def __call__(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class RegNet(Chain):
"""
RegNet model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
groups : list of int
Number of groups for each stage.
use_se : bool
Whether to use SE-module.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
groups,
use_se,
in_channels=3,
in_size=(224, 224),
classes=1000):
super(RegNet, self).__init__()
self.in_size = in_size
self.classes = classes
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
setattr(self.features, "init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
stride=2,
pad=1))
in_channels = init_block_channels
for i, (channels_per_stage, groups_per_stage) in enumerate(zip(channels, groups)):
stage = SimpleSequential()
with stage.init_scope():
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) else 1
setattr(stage, "unit{}".format(j + 1), RegNetUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
groups=groups_per_stage,
use_se=use_se))
in_channels = out_channels
setattr(self.features, "stage{}".format(i + 1), stage)
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=(in_size[0] // 32, in_size[1] // 32)))
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, "flatten", partial(
F.reshape,
shape=(-1, in_channels)))
setattr(self.output, "fc", L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_regnet(channels_init,
channels_slope,
channels_mult,
depth,
groups,
use_se=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create RegNet model with specific parameters.
Parameters:
----------
channels_init : float
Initial value for channels/widths.
channels_slope : float
Slope value for channels/widths.
width_mult : float
Width multiplier value.
groups : int
Number of groups.
depth : int
Depth value.
use_se : bool, default False
Whether to use SE-module.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
divisor = 8
assert (channels_slope >= 0) and (channels_init > 0) and (channels_mult > 1) and (channels_init % divisor == 0)
# Generate continuous per-block channels/widths:
channels_cont = np.arange(depth) * channels_slope + channels_init
# Generate quantized per-block channels/widths:
channels_exps = np.round(np.log(channels_cont / channels_init) / np.log(channels_mult))
channels = channels_init * np.power(channels_mult, channels_exps)
channels = (np.round(channels / divisor) * divisor).astype(np.int)
# Generate per stage channels/widths and layers/depths:
channels_per_stage, layers = np.unique(channels, return_counts=True)
# Adjusts the compatibility of channels/widths and groups:
groups_per_stage = [min(groups, c) for c in channels_per_stage]
channels_per_stage = [int(round(c / g) * g) for c, g in zip(channels_per_stage, groups_per_stage)]
channels = [[ci] * li for (ci, li) in zip(channels_per_stage, layers)]
init_block_channels = 32
net = RegNet(
channels=channels,
init_block_channels=init_block_channels,
groups=groups_per_stage,
use_se=use_se,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def regnetx002(**kwargs):
"""
RegNetX-200MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13, groups=8,
model_name="regnetx002", **kwargs)
def regnetx004(**kwargs):
"""
RegNetX-400MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=24.48, channels_mult=2.54, depth=22, groups=16,
model_name="regnetx004", **kwargs)
def regnetx006(**kwargs):
"""
RegNetX-600MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=36.97, channels_mult=2.24, depth=16, groups=24,
model_name="regnetx006", **kwargs)
def regnetx008(**kwargs):
"""
RegNetX-800MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=56, channels_slope=35.73, channels_mult=2.28, depth=16, groups=16,
model_name="regnetx008", **kwargs)
def regnetx016(**kwargs):
"""
RegNetX-1.6GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=34.01, channels_mult=2.25, depth=18, groups=24,
model_name="regnetx016", **kwargs)
def regnetx032(**kwargs):
"""
RegNetX-3.2GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=88, channels_slope=26.31, channels_mult=2.25, depth=25, groups=48,
model_name="regnetx032", **kwargs)
def regnetx040(**kwargs):
"""
RegNetX-4.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=96, channels_slope=38.65, channels_mult=2.43, depth=23, groups=40,
model_name="regnetx040", **kwargs)
def regnetx064(**kwargs):
"""
RegNetX-6.4GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=184, channels_slope=60.83, channels_mult=2.07, depth=17, groups=56,
model_name="regnetx064", **kwargs)
def regnetx080(**kwargs):
"""
RegNetX-8.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=49.56, channels_mult=2.88, depth=23, groups=120,
model_name="regnetx080", **kwargs)
def regnetx120(**kwargs):
"""
RegNetX-12GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=168, channels_slope=73.36, channels_mult=2.37, depth=19, groups=112,
model_name="regnetx120", **kwargs)
def regnetx160(**kwargs):
"""
RegNetX-16GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=216, channels_slope=55.59, channels_mult=2.1, depth=22, groups=128,
model_name="regnetx160", **kwargs)
def regnetx320(**kwargs):
"""
RegNetX-32GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=320, channels_slope=69.86, channels_mult=2.0, depth=23, groups=168,
model_name="regnetx320", **kwargs)
def regnety002(**kwargs):
"""
RegNetY-200MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13, groups=8, use_se=True,
model_name="regnety002", **kwargs)
def regnety004(**kwargs):
"""
RegNetY-400MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=27.89, channels_mult=2.09, depth=16, groups=8, use_se=True,
model_name="regnety004", **kwargs)
def regnety006(**kwargs):
"""
RegNetY-600MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=32.54, channels_mult=2.32, depth=15, groups=16, use_se=True,
model_name="regnety006", **kwargs)
def regnety008(**kwargs):
"""
RegNetY-800MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=56, channels_slope=38.84, channels_mult=2.4, depth=14, groups=16, use_se=True,
model_name="regnety008", **kwargs)
def regnety016(**kwargs):
"""
RegNetY-1.6GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=20.71, channels_mult=2.65, depth=27, groups=24, use_se=True,
model_name="regnety016", **kwargs)
def regnety032(**kwargs):
"""
RegNetY-3.2GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=42.63, channels_mult=2.66, depth=21, groups=24, use_se=True,
model_name="regnety032", **kwargs)
def regnety040(**kwargs):
"""
RegNetY-4.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=96, channels_slope=31.41, channels_mult=2.24, depth=22, groups=64, use_se=True,
model_name="regnety040", **kwargs)
def regnety064(**kwargs):
"""
RegNetY-6.4GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=112, channels_slope=33.22, channels_mult=2.27, depth=25, groups=72, use_se=True,
model_name="regnety064", **kwargs)
def regnety080(**kwargs):
"""
RegNetY-8.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=192, channels_slope=76.82, channels_mult=2.19, depth=17, groups=56, use_se=True,
model_name="regnety080", **kwargs)
def regnety120(**kwargs):
"""
RegNetY-12GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=168, channels_slope=73.36, channels_mult=2.37, depth=19, groups=112, use_se=True,
model_name="regnety120", **kwargs)
def regnety160(**kwargs):
"""
RegNetY-16GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=200, channels_slope=106.23, channels_mult=2.48, depth=18, groups=112, use_se=True,
model_name="regnety160", **kwargs)
def regnety320(**kwargs):
"""
RegNetY-32GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=232, channels_slope=115.89, channels_mult=2.53, depth=20, groups=232, use_se=True,
model_name="regnety320", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
regnetx002,
regnetx004,
regnetx006,
regnetx008,
regnetx016,
regnetx032,
regnetx040,
regnetx064,
regnetx080,
regnetx120,
regnetx160,
regnetx320,
regnety002,
regnety004,
regnety006,
regnety008,
regnety016,
regnety032,
regnety040,
regnety064,
regnety080,
regnety120,
regnety160,
regnety320,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != regnetx002 or weight_count == 2684792)
assert (model != regnetx004 or weight_count == 5157512)
assert (model != regnetx006 or weight_count == 6196040)
assert (model != regnetx008 or weight_count == 7259656)
assert (model != regnetx016 or weight_count == 9190136)
assert (model != regnetx032 or weight_count == 15296552)
assert (model != regnetx040 or weight_count == 22118248)
assert (model != regnetx064 or weight_count == 26209256)
assert (model != regnetx080 or weight_count == 39572648)
assert (model != regnetx120 or weight_count == 46106056)
assert (model != regnetx160 or weight_count == 54278536)
assert (model != regnetx320 or weight_count == 107811560)
assert (model != regnety002 or weight_count == 3162996)
assert (model != regnety004 or weight_count == 4344144)
assert (model != regnety006 or weight_count == 6055160)
assert (model != regnety008 or weight_count == 6263168)
assert (model != regnety016 or weight_count == 11202430)
assert (model != regnety032 or weight_count == 19436338)
assert (model != regnety040 or weight_count == 20646656)
assert (model != regnety064 or weight_count == 30583252)
assert (model != regnety080 or weight_count == 39180068)
assert (model != regnety120 or weight_count == 51822544)
assert (model != regnety160 or weight_count == 83590140)
assert (model != regnety320 or weight_count == 145046770)
batch = 14
size = 224
x = np.zeros((batch, 3, size, size), np.float32)
y = net(x)
assert (y.shape == (batch, 1000))
if __name__ == "__main__":
_test()
| 24,754
| 33.622378
| 118
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/icnet.py
|
"""
ICNet for image segmentation, implemented in Chainer.
Original paper: 'ICNet for Real-Time Semantic Segmentation on High-Resolution Images,'
https://arxiv.org/abs/1704.08545.
"""
__all__ = ['ICNet', 'icnet_resnetd50b_cityscapes']
import os
import chainer.functions as F
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import conv1x1, conv1x1_block, conv3x3_block, InterpolationBlock, MultiOutputSequential
from .pspnet import PyramidPooling
from .resnetd import resnetd50b
class ICInitBlock(Chain):
"""
ICNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(ICInitBlock, self).__init__()
mid_channels = out_channels // 2
with self.init_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=2)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=2)
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
stride=2)
def __call__(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class PSPBlock(Chain):
"""
ICNet specific PSPNet reduced head block.
Parameters:
----------
in_channels : int
Number of input channels.
upscale_out_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
bottleneck_factor : int
Bottleneck factor.
"""
def __init__(self,
in_channels,
upscale_out_size,
bottleneck_factor):
super(PSPBlock, self).__init__()
assert (in_channels % bottleneck_factor == 0)
mid_channels = in_channels // bottleneck_factor
with self.init_scope():
self.pool = PyramidPooling(
in_channels=in_channels,
upscale_out_size=upscale_out_size)
self.conv = conv3x3_block(
in_channels=4096,
out_channels=mid_channels)
self.dropout = partial(
F.dropout,
ratio=0.1)
def __call__(self, x):
x = self.pool(x)
x = self.conv(x)
x = self.dropout(x)
return x
class CFFBlock(Chain):
"""
Cascade Feature Fusion block.
Parameters:
----------
in_channels_low : int
Number of input channels (low input).
in_channels_high : int
Number of input channels (low high).
out_channels : int
Number of output channels.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels_low,
in_channels_high,
out_channels,
classes):
super(CFFBlock, self).__init__()
with self.init_scope():
self.up = InterpolationBlock(scale_factor=2)
self.conv_low = conv3x3_block(
in_channels=in_channels_low,
out_channels=out_channels,
pad=2,
dilate=2,
activation=None)
self.conv_hign = conv1x1_block(
in_channels=in_channels_high,
out_channels=out_channels,
activation=None)
self.activ = F.relu
self.conv_cls = conv1x1(
in_channels=out_channels,
out_channels=classes)
def __call__(self, xl, xh):
xl = self.up(xl)
xl = self.conv_low(xl)
xh = self.conv_hign(xh)
x = xl + xh
x = self.activ(x)
x_cls = self.conv_cls(xl)
return x, x_cls
class ICHeadBlock(Chain):
"""
ICNet head block.
Parameters:
----------
classes : int
Number of classification classes.
"""
def __init__(self,
classes):
super(ICHeadBlock, self).__init__()
with self.init_scope():
self.cff_12 = CFFBlock(
in_channels_low=128,
in_channels_high=64,
out_channels=128,
classes=classes)
self.cff_24 = CFFBlock(
in_channels_low=256,
in_channels_high=256,
out_channels=128,
classes=classes)
self.up_x2 = InterpolationBlock(scale_factor=2)
self.up_x8 = InterpolationBlock(scale_factor=4)
self.conv_cls = conv1x1(
in_channels=128,
out_channels=classes)
def __call__(self, x1, x2, x4):
outputs = []
x_cff_24, x_24_cls = self.cff_24(x4, x2)
outputs.append(x_24_cls)
x_cff_12, x_12_cls = self.cff_12(x_cff_24, x1)
outputs.append(x_12_cls)
up_x2 = self.up_x2(x_cff_12)
up_x2 = self.conv_cls(up_x2)
outputs.append(up_x2)
up_x8 = self.up_x8(up_x2)
outputs.append(up_x8)
# 1 -> 1/4 -> 1/8 -> 1/16
outputs.reverse()
return tuple(outputs)
class ICNet(Chain):
"""
ICNet model from 'ICNet for Real-Time Semantic Segmentation on High-Resolution Images,'
https://arxiv.org/abs/1704.08545.
Parameters:
----------
backbones : tuple of nn.Sequential
Feature extractors.
backbones_out_channels : tuple of int
Number of output channels form each feature extractor.
classes : tuple of int
Number of output channels for each branch.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
backbones,
backbones_out_channels,
channels,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=21):
super(ICNet, self).__init__()
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
psp_pool_out_size = (self.in_size[0] // 32, self.in_size[1] // 32) if fixed_size else None
psp_head_out_channels = 512
with self.init_scope():
self.branch1 = ICInitBlock(
in_channels=in_channels,
out_channels=channels[0])
self.branch2 = MultiOutputSequential()
with self.branch2.init_scope():
setattr(self.branch2, "down1", InterpolationBlock(
scale_factor=2,
up=False))
backbones[0].do_output = True
setattr(self.branch2, "backbones1", backbones[0])
setattr(self.branch2, "down2", InterpolationBlock(
scale_factor=2,
up=False))
setattr(self.branch2, "backbones2", backbones[1])
setattr(self.branch2, "psp", PSPBlock(
in_channels=backbones_out_channels[1],
upscale_out_size=psp_pool_out_size,
bottleneck_factor=4))
setattr(self.branch2, "final_block", conv1x1_block(
in_channels=psp_head_out_channels,
out_channels=channels[2]))
self.conv_y2 = conv1x1_block(
in_channels=backbones_out_channels[0],
out_channels=channels[1])
self.final_block = ICHeadBlock(classes=classes)
def __call__(self, x):
y1 = self.branch1(x)
y3, y2 = self.branch2(x)
y2 = self.conv_y2(y2)
x = self.final_block(y1, y2, y3)
if self.aux:
return x
else:
return x[0]
def get_icnet(backbones,
backbones_out_channels,
classes,
aux=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create ICNet model with specific parameters.
Parameters:
----------
backbones : tuple of nn.Sequential
Feature extractors.
backbones_out_channels : tuple of int
Number of output channels form each feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
channels = (64, 256, 256)
backbones[0].multi_output = False
backbones[1].multi_output = False
net = ICNet(
backbones=backbones,
backbones_out_channels=backbones_out_channels,
channels=channels,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def icnet_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
ICNet model on the base of ResNet(D)-50b for Cityscapes from 'ICNet for Real-Time Semantic Segmentation on
High-Resolution Images,' https://arxiv.org/abs/1704.08545.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
backbone1 = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=None).features
for i in range(len(backbone1) - 3):
delattr(backbone1, backbone1.layer_names[-1])
backbone2 = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=None).features
del backbone2.final_pool
for i in range(3):
delattr(backbone2, backbone2.layer_names[0])
backbones = (backbone1, backbone2)
backbones_out_channels = (512, 2048)
return get_icnet(backbones=backbones, backbones_out_channels=backbones_out_channels, classes=classes,
aux=aux, model_name="icnet_resnetd50b_cityscapes", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
in_size = (480, 480)
aux = True
fixed_size = False
pretrained = False
models = [
(icnet_resnetd50b_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size, aux=aux)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != icnet_resnetd50b_cityscapes or weight_count == 47489184)
x = np.zeros((1, 3, in_size[0], in_size[1]), np.float32)
ys = net(x)
y = ys[0] if aux else ys
assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and
(y.shape[3] == x.shape[3]))
if __name__ == "__main__":
_test()
| 12,461
| 30.549367
| 115
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/mobilenetb.py
|
"""
MobileNet(B) with simplified depthwise separable convolution block for ImageNet-1K, implemented in Chainer.
Original paper: 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
"""
__all__ = ['mobilenetb_w1', 'mobilenetb_w3d4', 'mobilenetb_wd2', 'mobilenetb_wd4']
from .mobilenet import get_mobilenet
def mobilenetb_w1(**kwargs):
"""
1.0 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=1.0, dws_simplified=True, model_name="mobilenetb_w1", **kwargs)
def mobilenetb_w3d4(**kwargs):
"""
0.75 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.75, dws_simplified=True, model_name="mobilenetb_w3d4", **kwargs)
def mobilenetb_wd2(**kwargs):
"""
0.5 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.5, dws_simplified=True, model_name="mobilenetb_wd2", **kwargs)
def mobilenetb_wd4(**kwargs):
"""
0.25 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.25, dws_simplified=True, model_name="mobilenetb_wd4", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
mobilenetb_w1,
mobilenetb_w3d4,
mobilenetb_wd2,
mobilenetb_wd4,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenetb_w1 or weight_count == 4222056)
assert (model != mobilenetb_w3d4 or weight_count == 2578120)
assert (model != mobilenetb_wd2 or weight_count == 1326632)
assert (model != mobilenetb_wd4 or weight_count == 467592)
x = np.zeros((1, 3, 224, 224), np.float32)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 3,571
| 33.019048
| 113
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/shakedropresnet_cifar.py
|
"""
ShakeDrop-ResNet for CIFAR/SVHN, implemented in Chainer.
Original paper: 'ShakeDrop Regularization for Deep Residual Learning,' https://arxiv.org/abs/1802.02375.
"""
__all__ = ['CIFARShakeDropResNet', 'shakedropresnet20_cifar10', 'shakedropresnet20_cifar100', 'shakedropresnet20_svhn']
import os
import chainer
from chainer import backend
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import conv1x1_block, conv3x3_block, SimpleSequential
from .resnet import ResBlock, ResBottleneck
class ShakeDrop(chainer.function.Function):
"""
ShakeDrop function.
Parameters:
----------
p : float
ShakeDrop specific probability (of life) for Bernoulli random variable.
"""
def __init__(self, p):
super(ShakeDrop, self).__init__()
self.p = p
self.b = None
def forward(self, inputs):
x, = inputs
if chainer.config.train:
xp = backend.get_array_module(x)
b = xp.random.binomial(n=1, p=self.p)
alpha = xp.empty((x.shape[0], 1, 1, 1), dtype=x.dtype)
for i in range(len(alpha)):
alpha[i] = xp.random.uniform(low=-1.0, high=1.0)
y = (b + alpha - b * alpha) * x
self.b = b
else:
y = self.p * x
return y,
def backward(self, inputs, grad_outputs):
dy, = grad_outputs
b = self.b
xp = backend.get_array_module(dy)
beta = xp.empty((dy.shape[0], 1, 1, 1), dtype=dy.dtype)
for i in range(len(beta)):
beta[i] = xp.random.uniform(low=0.0, high=1.0)
return (b + beta - b * beta) * dy,
class ShakeDropResUnit(Chain):
"""
ShakeDrop-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_prob : float
Residual branch life probability.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck,
life_prob):
super(ShakeDropResUnit, self).__init__()
self.life_prob = life_prob
self.resize_identity = (in_channels != out_channels) or (stride != 1)
body_class = ResBottleneck if bottleneck else ResBlock
with self.init_scope():
self.body = body_class(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = F.relu
def __call__(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = ShakeDrop(self.life_prob)(x) + identity
# x = self.shake_drop(x) + identity
x = self.activ(x)
return x
class CIFARShakeDropResNet(Chain):
"""
ShakeDrop-ResNet model for CIFAR from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_probs : list of float
Residual branch life probability for each unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
life_probs,
in_channels=3,
in_size=(32, 32),
classes=10):
super(CIFARShakeDropResNet, self).__init__()
self.in_size = in_size
self.classes = classes
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
setattr(self.features, "init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
k = 0
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential()
with stage.init_scope():
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
setattr(stage, "unit{}".format(j + 1), ShakeDropResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
life_prob=life_probs[k]))
in_channels = out_channels
k += 1
setattr(self.features, "stage{}".format(i + 1), stage)
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=8,
stride=1))
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, "flatten", partial(
F.reshape,
shape=(-1, in_channels)))
setattr(self.output, "fc", L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_shakedropresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create ShakeDrop-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
channels_per_layers = [16, 32, 64]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
total_layers = sum(layers)
final_death_prob = 0.5
life_probs = [1.0 - float(i + 1) / float(total_layers) * final_death_prob for i in range(total_layers)]
net = CIFARShakeDropResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
life_probs=life_probs,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def shakedropresnet20_cifar10(classes=10, **kwargs):
"""
ShakeDrop-ResNet-20 model for CIFAR-10 from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_cifar10", **kwargs)
def shakedropresnet20_cifar100(classes=100, **kwargs):
"""
ShakeDrop-ResNet-20 model for CIFAR-100 from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_cifar100", **kwargs)
def shakedropresnet20_svhn(classes=10, **kwargs):
"""
ShakeDrop-ResNet-20 model for SVHN from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_svhn", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
(shakedropresnet20_cifar10, 10),
(shakedropresnet20_cifar100, 100),
(shakedropresnet20_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shakedropresnet20_cifar10 or weight_count == 272474)
assert (model != shakedropresnet20_cifar100 or weight_count == 278324)
assert (model != shakedropresnet20_svhn or weight_count == 272474)
x = np.zeros((14, 3, 32, 32), np.float32)
y = net(x)
assert (y.shape == (14, classes))
if __name__ == "__main__":
_test()
| 11,397
| 32.721893
| 119
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/inceptionresnetv1.py
|
"""
InceptionResNetV1 for ImageNet-1K, implemented in Chainer.
Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
"""
__all__ = ['InceptionResNetV1', 'inceptionresnetv1', 'InceptionAUnit', 'InceptionBUnit', 'InceptionCUnit',
'ReductionAUnit', 'ReductionBUnit']
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import conv1x1, conv1x1_block, conv3x3_block, SimpleSequential, Concurrent
from .inceptionv3 import MaxPoolBranch, Conv1x1Branch, ConvSeqBranch
class InceptionAUnit(Chain):
"""
InceptionResNetV1 type Inception-A unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps):
super(InceptionAUnit, self).__init__()
self.scale = 0.17
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_eps=bn_eps))
setattr(self.branches, "branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:3],
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1),
bn_eps=bn_eps))
setattr(self.branches, "branch3", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[3:6],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_eps=bn_eps))
conv_in_channels = out_channels_list[0] + out_channels_list[2] + out_channels_list[5]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
use_bias=True)
self.activ = F.relu
def __call__(self, x):
identity = x
x = self.branches(x)
x = self.conv(x)
x = self.scale * x + identity
x = self.activ(x)
return x
class InceptionBUnit(Chain):
"""
InceptionResNetV1 type Inception-B unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps):
super(InceptionBUnit, self).__init__()
self.scale = 0.10
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_eps=bn_eps))
setattr(self.branches, "branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
bn_eps=bn_eps))
conv_in_channels = out_channels_list[0] + out_channels_list[3]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
use_bias=True)
self.activ = F.relu
def __call__(self, x):
identity = x
x = self.branches(x)
x = self.conv(x)
x = self.scale * x + identity
x = self.activ(x)
return x
class InceptionCUnit(Chain):
"""
InceptionResNetV1 type Inception-C unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
scale : float, default 1.0
Scale value for residual branch.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps,
scale=0.2,
activate=True):
super(InceptionCUnit, self).__init__()
self.activate = activate
self.scale = scale
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_eps=bn_eps))
setattr(self.branches, "branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, (1, 3), (3, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 1), (1, 0)),
bn_eps=bn_eps))
conv_in_channels = out_channels_list[0] + out_channels_list[3]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
use_bias=True)
if self.activate:
self.activ = F.relu
def __call__(self, x):
identity = x
x = self.branches(x)
x = self.conv(x)
x = self.scale * x + identity
if self.activate:
x = self.activ(x)
return x
class ReductionAUnit(Chain):
"""
InceptionResNetV1 type Reduction-A unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps):
super(ReductionAUnit, self).__init__()
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[0:1],
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,),
bn_eps=bn_eps))
setattr(self.branches, "branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_eps=bn_eps))
setattr(self.branches, "branch3", MaxPoolBranch())
def __call__(self, x):
x = self.branches(x)
return x
class ReductionBUnit(Chain):
"""
InceptionResNetV1 type Reduction-B unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_eps):
super(ReductionBUnit, self).__init__()
with self.init_scope():
self.branches = Concurrent()
with self.branches.init_scope():
setattr(self.branches, "branch1", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[0:2],
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_eps=bn_eps))
setattr(self.branches, "branch2", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[2:4],
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_eps=bn_eps))
setattr(self.branches, "branch3", ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[4:7],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_eps=bn_eps))
setattr(self.branches, "branch4", MaxPoolBranch())
def __call__(self, x):
x = self.branches(x)
return x
class InceptInitBlock(Chain):
"""
InceptionResNetV1 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
bn_eps):
super(InceptInitBlock, self).__init__()
with self.init_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
stride=2,
pad=0,
bn_eps=bn_eps)
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
stride=1,
pad=0,
bn_eps=bn_eps)
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
stride=1,
pad=1,
bn_eps=bn_eps)
self.pool = partial(
F.max_pooling_2d,
ksize=3,
stride=2,
pad=0,
cover_all=False)
self.conv4 = conv1x1_block(
in_channels=64,
out_channels=80,
stride=1,
pad=0,
bn_eps=bn_eps)
self.conv5 = conv3x3_block(
in_channels=80,
out_channels=192,
stride=1,
pad=0,
bn_eps=bn_eps)
self.conv6 = conv3x3_block(
in_channels=192,
out_channels=256,
stride=2,
pad=0,
bn_eps=bn_eps)
def __call__(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.pool(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.conv6(x)
return x
class InceptHead(Chain):
"""
InceptionResNetV1 specific classification block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float
Small float added to variance in Batch norm.
dropout_rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels,
bn_eps,
dropout_rate,
classes):
super(InceptHead, self).__init__()
self.use_dropout = (dropout_rate != 0.0)
with self.init_scope():
self.flatten = partial(
F.reshape,
shape=(-1, in_channels))
if self.use_dropout:
self.dropout = partial(
F.dropout,
ratio=dropout_rate)
self.fc1 = L.Linear(
in_size=in_channels,
out_size=512,
nobias=True)
self.bn = L.BatchNormalization(
size=512,
eps=bn_eps)
self.fc2 = L.Linear(
in_size=512,
out_size=classes)
def __call__(self, x):
x = self.flatten(x)
if self.use_dropout:
x = self.dropout(x)
x = self.fc1(x)
x = self.bn(x)
x = self.fc2(x)
return x
class InceptionResNetV1(Chain):
"""
InceptionResNetV1 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
dropout_rate=0.0,
bn_eps=1e-5,
in_channels=3,
in_size=(299, 299),
classes=1000):
super(InceptionResNetV1, self).__init__()
self.in_size = in_size
self.classes = classes
layers = [5, 11, 7]
in_channels_list = [256, 896, 1792]
normal_out_channels_list = [[32, 32, 32, 32, 32, 32], [128, 128, 128, 128], [192, 192, 192, 192]]
reduction_out_channels_list = [[384, 192, 192, 256], [256, 384, 256, 256, 256, 256, 256]]
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
setattr(self.features, "init_block", InceptInitBlock(
in_channels=in_channels,
bn_eps=bn_eps))
in_channels = in_channels_list[0]
for i, layers_per_stage in enumerate(layers):
stage = SimpleSequential()
with stage.init_scope():
for j in range(layers_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
out_channels_list_per_stage = reduction_out_channels_list[i - 1]
else:
unit = normal_units[i]
out_channels_list_per_stage = normal_out_channels_list[i]
if (i == len(layers) - 1) and (j == layers_per_stage - 1):
unit_kwargs = {"scale": 1.0, "activate": False}
else:
unit_kwargs = {}
setattr(stage, "unit{}".format(j + 1), unit(
in_channels=in_channels,
out_channels_list=out_channels_list_per_stage,
bn_eps=bn_eps,
**unit_kwargs))
if (j == 0) and (i != 0):
in_channels = in_channels_list[i]
setattr(self.features, "stage{}".format(i + 1), stage)
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=8,
stride=1))
self.output = InceptHead(
in_channels=in_channels,
bn_eps=bn_eps,
dropout_rate=dropout_rate,
classes=classes)
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_inceptionresnetv1(model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create InceptionResNetV1 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
net = InceptionResNetV1(**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def inceptionresnetv1(**kwargs):
"""
InceptionResNetV1 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_inceptionresnetv1(model_name="inceptionresnetv1", bn_eps=1e-3, **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
inceptionresnetv1,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionresnetv1 or weight_count == 23995624)
x = np.zeros((1, 3, 299, 299), np.float32)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 18,521
| 32.554348
| 117
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/scnet.py
|
"""
SCNet for ImageNet-1K, implemented in Chainer.
Original paper: 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
"""
__all__ = ['SCNet', 'scnet50', 'scnet101', 'scneta50', 'scneta101']
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import conv1x1_block, conv3x3_block, InterpolationBlock, SimpleSequential
from .resnet import ResInitBlock
from .senet import SEInitBlock
from .resnesta import ResNeStADownBlock
class ScDownBlock(Chain):
"""
SCNet specific convolutional downscale block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
pool_size: int or list/tuple of 2 ints, default 2
Size of the average pooling windows.
"""
def __init__(self,
in_channels,
out_channels,
pool_size=2,
**kwargs):
super(ScDownBlock, self).__init__(**kwargs)
with self.init_scope():
self.pool = partial(
F.average_pooling_nd,
ksize=pool_size,
stride=pool_size,
pad_value=None)
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
activation=None)
def __call__(self, x):
x = self.pool(x)
x = self.conv(x)
return x
class ScConv(Chain):
"""
Self-calibrated convolutional block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
scale_factor : int
Scale factor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
scale_factor,
**kwargs):
super(ScConv, self).__init__(**kwargs)
with self.init_scope():
self.down = ScDownBlock(
in_channels=in_channels,
out_channels=out_channels,
pool_size=scale_factor)
self.up = InterpolationBlock(
scale_factor=scale_factor,
mode="nearest",
align_corners=False)
self.sigmoid = F.sigmoid
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
activation=None)
self.conv2 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
def __call__(self, x):
w = self.sigmoid(x + self.up(self.down(x), size=x.shape[2:]))
x = self.conv1(x) * w
x = self.conv2(x)
return x
class ScBottleneck(Chain):
"""
SCNet specific bottleneck block for residual path in SCNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
bottleneck_factor : int, default 4
Bottleneck factor.
scale_factor : int, default 4
Scale factor.
avg_downsample : bool, default False
Whether to use average downsampling.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck_factor=4,
scale_factor=4,
avg_downsample=False,
**kwargs):
super(ScBottleneck, self).__init__(**kwargs)
self.avg_resize = (stride > 1) and avg_downsample
mid_channels = out_channels // bottleneck_factor // 2
with self.init_scope():
self.conv1a = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2a = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=(1 if self.avg_resize else stride))
self.conv1b = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2b = ScConv(
in_channels=mid_channels,
out_channels=mid_channels,
stride=(1 if self.avg_resize else stride),
scale_factor=scale_factor)
if self.avg_resize:
self.pool = partial(
F.average_pooling_nd,
ksize=3,
stride=stride,
pad=1)
self.conv3 = conv1x1_block(
in_channels=(2 * mid_channels),
out_channels=out_channels,
activation=None)
def __call__(self, x):
y = self.conv1a(x)
y = self.conv2a(y)
z = self.conv1b(x)
z = self.conv2b(z)
if self.avg_resize:
y = self.pool(y)
z = self.pool(z)
x = F.concat((y, z), axis=1)
x = self.conv3(x)
return x
class ScUnit(Chain):
"""
SCNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the convolution.
avg_downsample : bool, default False
Whether to use average downsampling.
"""
def __init__(self,
in_channels,
out_channels,
stride,
avg_downsample=False,
**kwargs):
super(ScUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (stride != 1)
with self.init_scope():
self.body = ScBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
avg_downsample=avg_downsample)
if self.resize_identity:
if avg_downsample:
self.identity_block = ResNeStADownBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
else:
self.identity_block = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = F.relu
def __call__(self, x):
if self.resize_identity:
identity = self.identity_block(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class SCNet(Chain):
"""
SCNet model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
se_init_block : bool, default False
SENet-like initial block.
avg_downsample : bool, default False
Whether to use average downsampling.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
se_init_block=False,
avg_downsample=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SCNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
init_block_class = SEInitBlock if se_init_block else ResInitBlock
setattr(self.features, "init_block", init_block_class(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential()
with stage.init_scope():
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
setattr(stage, "unit{}".format(j + 1), ScUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
avg_downsample=avg_downsample))
in_channels = out_channels
setattr(self.features, "stage{}".format(i + 1), stage)
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=(in_size[0] // 32, in_size[1] // 32)))
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, "flatten", partial(
F.reshape,
shape=(-1, in_channels)))
setattr(self.output, "fc", L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_scnet(blocks,
width_scale=1.0,
se_init_block=False,
avg_downsample=False,
init_block_channels_scale=1,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create SCNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
width_scale : float, default 1.0
Scale factor for width of layers.
se_init_block : bool, default False
SENet-like initial block.
avg_downsample : bool, default False
Whether to use average downsampling.
init_block_channels_scale : int, default 1
Scale factor for number of output channels in the initial unit.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
if blocks == 14:
layers = [1, 1, 1, 1]
elif blocks == 26:
layers = [2, 2, 2, 2]
elif blocks == 38:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported SCNet with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
init_block_channels *= init_block_channels_scale
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = SCNet(
channels=channels,
init_block_channels=init_block_channels,
se_init_block=se_init_block,
avg_downsample=avg_downsample,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def scnet50(**kwargs):
"""
SCNet-50 model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=50, model_name="scnet50", **kwargs)
def scnet101(**kwargs):
"""
SCNet-101 model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=101, model_name="scnet101", **kwargs)
def scneta50(**kwargs):
"""
SCNet(A)-50 with average downsampling model from 'Improving Convolutional Networks with Self-Calibrated
Convolutions,' http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=50, se_init_block=True, avg_downsample=True, model_name="scneta50", **kwargs)
def scneta101(**kwargs):
"""
SCNet(A)-101 with average downsampling model from 'Improving Convolutional Networks with Self-Calibrated
Convolutions,' http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=101, se_init_block=True, avg_downsample=True, init_block_channels_scale=2,
model_name="scneta101", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
scnet50,
scnet101,
scneta50,
scneta101,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != scnet50 or weight_count == 25564584)
assert (model != scnet101 or weight_count == 44565416)
assert (model != scneta50 or weight_count == 25583816)
assert (model != scneta101 or weight_count == 44689192)
x = np.zeros((1, 3, 224, 224), np.float32)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 15,771
| 31.187755
| 115
|
py
|
imgclsmob
|
imgclsmob-master/chainer_/chainercv2/models/igcv3.py
|
"""
IGCV3 for ImageNet-1K, implemented in Chainer.
Original paper: 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
"""
__all__ = ['IGCV3', 'igcv3_w1', 'igcv3_w3d4', 'igcv3_wd2', 'igcv3_wd4']
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, ChannelShuffle, SimpleSequential
class InvResUnit(Chain):
"""
So-called 'Inverted Residual Unit' layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the second convolution layer.
expansion : bool
Whether do expansion of channels.
"""
def __init__(self,
in_channels,
out_channels,
stride,
expansion):
super(InvResUnit, self).__init__()
self.residual = (in_channels == out_channels) and (stride == 1)
mid_channels = in_channels * 6 if expansion else in_channels
groups = 2
with self.init_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
groups=groups,
activation=None)
self.c_shuffle = ChannelShuffle(
channels=mid_channels,
groups=groups)
self.conv2 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
activation="relu6")
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
groups=groups,
activation=None)
def __call__(self, x):
if self.residual:
identity = x
x = self.conv1(x)
x = self.c_shuffle(x)
x = self.conv2(x)
x = self.conv3(x)
if self.residual:
x = x + identity
return x
class IGCV3(Chain):
"""
IGCV3 model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000):
super(IGCV3, self).__init__()
self.in_size = in_size
self.classes = classes
with self.init_scope():
self.features = SimpleSequential()
with self.features.init_scope():
setattr(self.features, "init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
stride=2,
activation="relu6"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SimpleSequential()
with stage.init_scope():
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
expansion = (i != 0) or (j != 0)
setattr(stage, "unit{}".format(j + 1), InvResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
expansion=expansion))
in_channels = out_channels
setattr(self.features, "stage{}".format(i + 1), stage)
setattr(self.features, "final_block", conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
activation="relu6"))
in_channels = final_block_channels
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=7,
stride=1))
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, "flatten", partial(
F.reshape,
shape=(-1, in_channels)))
setattr(self.output, "fc", L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x):
x = self.features(x)
x = self.output(x)
return x
def get_mobilenetv2(width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create IGCV3-D model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
final_block_channels = 1280
layers = [1, 4, 6, 8, 6, 6, 1]
downsample = [0, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 32, 64, 96, 160, 320]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [[]])
if width_scale != 1.0:
def make_even(x):
return x if (x % 2 == 0) else x + 1
channels = [[make_even(int(cij * width_scale)) for cij in ci] for ci in channels]
init_block_channels = make_even(int(init_block_channels * width_scale))
if width_scale > 1.0:
final_block_channels = make_even(int(final_block_channels * width_scale))
net = IGCV3(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def igcv3_w1(**kwargs):
"""
IGCV3-D 1.0x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=1.0, model_name="igcv3_w1", **kwargs)
def igcv3_w3d4(**kwargs):
"""
IGCV3-D 0.75x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.75, model_name="igcv3_w3d4", **kwargs)
def igcv3_wd2(**kwargs):
"""
IGCV3-D 0.5x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.5, model_name="igcv3_wd2", **kwargs)
def igcv3_wd4(**kwargs):
"""
IGCV3-D 0.25x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.25, model_name="igcv3_wd4", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
igcv3_w1,
igcv3_w3d4,
igcv3_wd2,
igcv3_wd4,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != igcv3_w1 or weight_count == 3491688)
assert (model != igcv3_w3d4 or weight_count == 2638084)
assert (model != igcv3_wd2 or weight_count == 1985528)
assert (model != igcv3_wd4 or weight_count == 1534020)
x = np.zeros((1, 3, 224, 224), np.float32)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 10,203
| 32.788079
| 115
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.