code stringlengths 101 5.91M |
|---|
def is_atoms_in_same_ring(i, j, ssr):
for s in ssr:
if ((i in s) and (j in s)):
return True
return False |
def get_versions():
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to find root of source tree', 'date': None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version', 'date': None} |
class ComplementationModulationModule(nn.Module):
def __init__(self, c_img=3, norm='batch', act_en='leaky_relu', act_de='relu', cnum=64):
super().__init__()
c_in = c_img
self.en_1_1 = nn.Conv2d(c_in, cnum, 3, 1, padding=1)
self.en_2_1 = EncodeBlock(cnum, (cnum * 2), normalization=norm, activation=act_en)
self.en_3_1 = EncodeBlock((cnum * 2), (cnum * 4), normalization=norm, activation=act_en)
self.en_4_1 = EncodeBlock((cnum * 4), (cnum * 8), normalization=norm, activation=act_en)
self.en_5_1 = EncodeBlock((cnum * 8), (cnum * 8), normalization=norm, activation=act_en)
self.en_6_1 = nn.Sequential(get_act(act_en), nn.Conv2d((cnum * 8), (cnum * 8), 4, 2, padding=1))
self.en_1_2 = nn.Conv2d(c_in, cnum, 3, 1, padding=1)
self.en_2_2 = EncodeBlock(cnum, (cnum * 2), normalization=norm, activation=act_en)
self.en_3_2 = EncodeBlock((cnum * 2), (cnum * 4), normalization=norm, activation=act_en)
self.en_4_2 = EncodeBlock((cnum * 4), (cnum * 8), normalization=norm, activation=act_en)
self.en_5_2 = EncodeBlock((cnum * 8), (cnum * 8), normalization=norm, activation=act_en)
self.en_6_2 = nn.Sequential(get_act(act_en), nn.Conv2d((cnum * 8), (cnum * 8), 4, 2, padding=1))
self.pooling = nn.AdaptiveAvgPool2d((1, 1))
self.fc_1 = nn.Linear((16 * cnum), (4 * cnum))
self.fc_2 = nn.Linear((4 * cnum), (16 * cnum))
self.de_6 = nn.Sequential(get_act(act_de), nn.ConvTranspose2d((cnum * 16), (cnum * 8), 4, 2, padding=1), nn.BatchNorm2d((cnum * 8)))
self.de_5 = DecodeBlock(((cnum * 8) * 3), (cnum * 8), normalization=norm, activation=act_de)
self.de_4 = DecodeBlock(((cnum * 8) * 3), (cnum * 4), normalization=norm, activation=act_de)
self.de_3 = DecodeBlock(((cnum * 4) * 3), (cnum * 2), normalization=norm, activation=act_de)
self.de_2 = DecodeBlock(((cnum * 2) * 3), cnum, normalization=norm, activation=act_de)
self.de_1 = nn.Sequential(get_act(act_de), nn.ConvTranspose2d((cnum * 3), c_img, 3, 1, padding=1))
def forward(self, x1, x2):
out_1_1 = self.en_1_1(x1)
out_2_1 = self.en_2_1(out_1_1)
out_3_1 = self.en_3_1(out_2_1)
out_4_1 = self.en_4_1(out_3_1)
out_5_1 = self.en_5_1(out_4_1)
out_6_1 = self.en_6_1(out_5_1)
out_1_2 = self.en_1_2(x2)
out_2_2 = self.en_2_2(out_1_2)
out_3_2 = self.en_3_2(out_2_2)
out_4_2 = self.en_4_2(out_3_2)
out_5_2 = self.en_5_2(out_4_2)
out_6_2 = self.en_6_2(out_5_2)
out_6 = torch.cat([out_6_1, out_6_2], dim=1)
residual = out_6
out_6 = self.pooling(out_6)
(N, C, _, _) = out_6.size()
out_6 = out_6.view(N, (- 1), C)
out_6_fc_1 = self.fc_1(out_6)
out_6_fc_1 = nn.ReLU(inplace=True)(out_6_fc_1)
out_6_fc_2 = self.fc_2(out_6_fc_1)
weight = nn.Sigmoid()(out_6_fc_2)
weight = weight.view(N, C, 1, 1)
out_6 = residual
out_6 = (out_6 * weight)
out_6 = (out_6 + residual)
d_out_6 = self.de_6(out_6)
d_out_6_out_5 = torch.cat([d_out_6, out_5_1, out_5_2], dim=1)
d_out_5 = self.de_5(d_out_6_out_5)
d_out_5_out_4 = torch.cat([d_out_5, out_4_1, out_4_2], dim=1)
d_out_4 = self.de_4(d_out_5_out_4)
d_out_4_out_3 = torch.cat([d_out_4, out_3_1, out_3_2], dim=1)
d_out_3 = self.de_3(d_out_4_out_3)
d_out_3_out_2 = torch.cat([d_out_3, out_2_1, out_2_2], dim=1)
d_out_2 = self.de_2(d_out_3_out_2)
d_out_2_out_1 = torch.cat([d_out_2, out_1_1, out_1_2], dim=1)
dout_1 = self.de_1(d_out_2_out_1)
return dout_1 |
def build_model(column_info, hidden_units=[100, 50, 25]):
wide_base_input_layers = []
wide_base_layers = []
for i in range(len(column_info.wide_base_cols)):
wide_base_input_layers.append(tf.keras.layers.Input(shape=[], dtype='int32'))
wide_base_layers.append(tf.keras.backend.one_hot(wide_base_input_layers[i], (column_info.wide_base_dims[i] + 1)))
wide_cross_input_layers = []
wide_cross_layers = []
for i in range(len(column_info.wide_cross_cols)):
wide_cross_input_layers.append(tf.keras.layers.Input(shape=[], dtype='int32'))
wide_cross_layers.append(tf.keras.backend.one_hot(wide_cross_input_layers[i], column_info.wide_cross_dims[i]))
indicator_input_layers = []
indicator_layers = []
for i in range(len(column_info.indicator_cols)):
indicator_input_layers.append(tf.keras.layers.Input(shape=[], dtype='int32'))
indicator_layers.append(tf.keras.backend.one_hot(indicator_input_layers[i], (column_info.indicator_dims[i] + 1)))
embed_input_layers = []
embed_layers = []
for i in range(len(column_info.embed_in_dims)):
embed_input_layers.append(tf.keras.layers.Input(shape=[], dtype='int32'))
embedding_layer = tf.keras.layers.Embedding((column_info.embed_in_dims[i] + 1), output_dim=column_info.embed_out_dims[i])
iembed = embedding_layer(embed_input_layers[i])
flat_embed = tf.keras.layers.Flatten()(iembed)
embed_layers.append(flat_embed)
continuous_input_layers = []
continuous_layers = []
for i in range(len(column_info.continuous_cols)):
continuous_input_layers.append(tf.keras.layers.Input(shape=[]))
continuous_layers.append(tf.keras.layers.Reshape(target_shape=(1,))(continuous_input_layers[i]))
if (len((wide_base_layers + wide_cross_layers)) > 1):
wide_input = tf.keras.layers.concatenate((wide_base_layers + wide_cross_layers), axis=1)
else:
wide_input = (wide_base_layers + wide_cross_layers)[0]
wide_out = tf.keras.layers.Dense(1)(wide_input)
if (len(((indicator_layers + embed_layers) + continuous_layers)) > 1):
deep_concat = tf.keras.layers.concatenate(((indicator_layers + embed_layers) + continuous_layers), axis=1)
else:
deep_concat = ((indicator_layers + embed_layers) + continuous_layers)[0]
linear = deep_concat
for ilayer in range(0, len(hidden_units)):
linear_mid = tf.keras.layers.Dense(hidden_units[ilayer])(linear)
bn = tf.keras.layers.BatchNormalization()(linear_mid)
relu = tf.keras.layers.ReLU()(bn)
dropout = tf.keras.layers.Dropout(0.1)(relu)
linear = dropout
deep_out = tf.keras.layers.Dense(1)(linear)
added = tf.keras.layers.add([wide_out, deep_out])
out = tf.keras.layers.Activation('sigmoid')(added)
model = tf.keras.models.Model(((((wide_base_input_layers + wide_cross_input_layers) + indicator_input_layers) + embed_input_layers) + continuous_input_layers), out)
return model |
class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin):
config_name = 'model_index.json'
def register_modules(self, **kwargs):
from diffusers import pipelines
for (name, module) in kwargs.items():
if (module is None):
register_dict = {name: (None, None)}
else:
library = module.__module__.split('.')[0]
pipeline_dir = module.__module__.split('.')[(- 2)]
path = module.__module__.split('.')
is_pipeline_module = ((pipeline_dir in path) and hasattr(pipelines, pipeline_dir))
if ((library not in LOADABLE_CLASSES) or is_pipeline_module):
library = pipeline_dir
class_name = module.__class__.__name__
register_dict = {name: (library, class_name)}
self.register_to_config(**register_dict)
setattr(self, name, module)
def save_pretrained(self, save_directory: Union[(str, os.PathLike)], params: Union[(Dict, FrozenDict)], push_to_hub: bool=False, **kwargs):
self.save_config(save_directory)
model_index_dict = dict(self.config)
model_index_dict.pop('_class_name')
model_index_dict.pop('_diffusers_version')
model_index_dict.pop('_module', None)
if push_to_hub:
commit_message = kwargs.pop('commit_message', None)
private = kwargs.pop('private', False)
create_pr = kwargs.pop('create_pr', False)
token = kwargs.pop('token', None)
repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[(- 1)])
repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id
for pipeline_component_name in model_index_dict.keys():
sub_model = getattr(self, pipeline_component_name)
if (sub_model is None):
continue
model_cls = sub_model.__class__
save_method_name = None
for (library_name, library_classes) in LOADABLE_CLASSES.items():
library = importlib.import_module(library_name)
for (base_class, save_load_methods) in library_classes.items():
class_candidate = getattr(library, base_class, None)
if ((class_candidate is not None) and issubclass(model_cls, class_candidate)):
save_method_name = save_load_methods[0]
break
if (save_method_name is not None):
break
save_method = getattr(sub_model, save_method_name)
expects_params = ('params' in set(inspect.signature(save_method).parameters.keys()))
if expects_params:
save_method(os.path.join(save_directory, pipeline_component_name), params=params[pipeline_component_name])
else:
save_method(os.path.join(save_directory, pipeline_component_name))
if push_to_hub:
self._upload_folder(save_directory, repo_id, token=token, commit_message=commit_message, create_pr=create_pr)
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[(str, os.PathLike)]], **kwargs):
cache_dir = kwargs.pop('cache_dir', DIFFUSERS_CACHE)
resume_download = kwargs.pop('resume_download', False)
proxies = kwargs.pop('proxies', None)
local_files_only = kwargs.pop('local_files_only', False)
use_auth_token = kwargs.pop('use_auth_token', None)
revision = kwargs.pop('revision', None)
from_pt = kwargs.pop('from_pt', False)
use_memory_efficient_attention = kwargs.pop('use_memory_efficient_attention', False)
split_head_dim = kwargs.pop('split_head_dim', False)
dtype = kwargs.pop('dtype', None)
if (not os.path.isdir(pretrained_model_name_or_path)):
config_dict = cls.load_config(pretrained_model_name_or_path, cache_dir=cache_dir, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision)
folder_names = [k for k in config_dict.keys() if (not k.startswith('_'))]
allow_patterns = [os.path.join(k, '*') for k in folder_names]
allow_patterns += [FLAX_WEIGHTS_NAME, SCHEDULER_CONFIG_NAME, CONFIG_NAME, cls.config_name]
ignore_patterns = (['*.bin', '*.safetensors'] if (not from_pt) else [])
ignore_patterns += ['*.onnx', '*.onnx_data', '*.xml', '*.pb']
if (cls != FlaxDiffusionPipeline):
requested_pipeline_class = cls.__name__
else:
requested_pipeline_class = config_dict.get('_class_name', cls.__name__)
requested_pipeline_class = (requested_pipeline_class if requested_pipeline_class.startswith('Flax') else ('Flax' + requested_pipeline_class))
user_agent = {'pipeline_class': requested_pipeline_class}
user_agent =
cached_folder = snapshot_download(pretrained_model_name_or_path, cache_dir=cache_dir, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, allow_patterns=allow_patterns, ignore_patterns=ignore_patterns, user_agent=user_agent)
else:
cached_folder = pretrained_model_name_or_path
config_dict = cls.load_config(cached_folder)
if (cls != FlaxDiffusionPipeline):
pipeline_class = cls
else:
diffusers_module = importlib.import_module(cls.__module__.split('.')[0])
class_name = (config_dict['_class_name'] if config_dict['_class_name'].startswith('Flax') else ('Flax' + config_dict['_class_name']))
pipeline_class = getattr(diffusers_module, class_name)
(expected_modules, optional_kwargs) = cls._get_signature_keys(pipeline_class)
passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if (k in kwargs)}
passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if (k in kwargs)}
(init_dict, unused_kwargs, _) = pipeline_class.extract_init_dict(config_dict, **kwargs)
init_kwargs = {k: init_dict.pop(k) for k in optional_kwargs if (k in init_dict)}
init_kwargs = {**init_kwargs, **passed_pipe_kwargs}
def load_module(name, value):
if (value[0] is None):
return False
if ((name in passed_class_obj) and (passed_class_obj[name] is None)):
return False
return True
init_dict = {k: v for (k, v) in init_dict.items() if load_module(k, v)}
if (len(unused_kwargs) > 0):
logger.warning(f'Keyword arguments {unused_kwargs} are not expected by {pipeline_class.__name__} and will be ignored.')
params = {}
from diffusers import pipelines
for (name, (library_name, class_name)) in init_dict.items():
if (class_name is None):
init_kwargs[name] = None
continue
is_pipeline_module = hasattr(pipelines, library_name)
loaded_sub_model = None
sub_model_should_be_defined = True
if (name in passed_class_obj):
if (not is_pipeline_module):
library = importlib.import_module(library_name)
class_obj = getattr(library, class_name)
importable_classes = LOADABLE_CLASSES[library_name]
class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()}
expected_class_obj = None
for (class_name, class_candidate) in class_candidates.items():
if ((class_candidate is not None) and issubclass(class_obj, class_candidate)):
expected_class_obj = class_candidate
if (not issubclass(passed_class_obj[name].__class__, expected_class_obj)):
raise ValueError(f'{passed_class_obj[name]} is of type: {type(passed_class_obj[name])}, but should be {expected_class_obj}')
elif (passed_class_obj[name] is None):
logger.warning(f'You have passed `None` for {name} to disable its functionality in {pipeline_class}. Note that this might lead to problems when using {pipeline_class} and is not recommended.')
sub_model_should_be_defined = False
else:
logger.warning(f'You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it has the correct type')
loaded_sub_model = passed_class_obj[name]
elif is_pipeline_module:
pipeline_module = getattr(pipelines, library_name)
class_obj = import_flax_or_no_model(pipeline_module, class_name)
importable_classes = ALL_IMPORTABLE_CLASSES
class_candidates = {c: class_obj for c in importable_classes.keys()}
else:
library = importlib.import_module(library_name)
class_obj = import_flax_or_no_model(library, class_name)
importable_classes = LOADABLE_CLASSES[library_name]
class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()}
if ((loaded_sub_model is None) and sub_model_should_be_defined):
load_method_name = None
for (class_name, class_candidate) in class_candidates.items():
if ((class_candidate is not None) and issubclass(class_obj, class_candidate)):
load_method_name = importable_classes[class_name][1]
load_method = getattr(class_obj, load_method_name)
if os.path.isdir(os.path.join(cached_folder, name)):
loadable_folder = os.path.join(cached_folder, name)
else:
loaded_sub_model = cached_folder
if issubclass(class_obj, FlaxModelMixin):
(loaded_sub_model, loaded_params) = load_method(loadable_folder, from_pt=from_pt, use_memory_efficient_attention=use_memory_efficient_attention, split_head_dim=split_head_dim, dtype=dtype)
params[name] = loaded_params
elif (is_transformers_available() and issubclass(class_obj, FlaxPreTrainedModel)):
if from_pt:
loaded_sub_model = load_method(loadable_folder, from_pt=from_pt)
loaded_params = loaded_sub_model.params
del loaded_sub_model._params
else:
(loaded_sub_model, loaded_params) = load_method(loadable_folder, _do_init=False)
params[name] = loaded_params
elif issubclass(class_obj, FlaxSchedulerMixin):
(loaded_sub_model, scheduler_state) = load_method(loadable_folder)
params[name] = scheduler_state
else:
loaded_sub_model = load_method(loadable_folder)
init_kwargs[name] = loaded_sub_model
missing_modules = (set(expected_modules) - set(init_kwargs.keys()))
passed_modules = list(passed_class_obj.keys())
if ((len(missing_modules) > 0) and (missing_modules <= set(passed_modules))):
for module in missing_modules:
init_kwargs[module] = passed_class_obj.get(module, None)
elif (len(missing_modules) > 0):
passed_modules = (set((list(init_kwargs.keys()) + list(passed_class_obj.keys()))) - optional_kwargs)
raise ValueError(f'Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed.')
model = pipeline_class(**init_kwargs, dtype=dtype)
return (model, params)
def _get_signature_keys(cls, obj):
parameters = inspect.signature(obj.__init__).parameters
required_parameters = {k: v for (k, v) in parameters.items() if (v.default == inspect._empty)}
optional_parameters = set({k for (k, v) in parameters.items() if (v.default != inspect._empty)})
expected_modules = (set(required_parameters.keys()) - {'self'})
return (expected_modules, optional_parameters)
def components(self) -> Dict[(str, Any)]:
(expected_modules, optional_parameters) = self._get_signature_keys(self)
components = {k: getattr(self, k) for k in self.config.keys() if ((not k.startswith('_')) and (k not in optional_parameters))}
if (set(components.keys()) != expected_modules):
raise ValueError(f'{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected {expected_modules} to be defined, but {components} are defined.')
return components
def numpy_to_pil(images):
if (images.ndim == 3):
images = images[(None, ...)]
images = (images * 255).round().astype('uint8')
if (images.shape[(- 1)] == 1):
pil_images = [Image.fromarray(image.squeeze(), mode='L') for image in images]
else:
pil_images = [Image.fromarray(image) for image in images]
return pil_images
def progress_bar(self, iterable):
if (not hasattr(self, '_progress_bar_config')):
self._progress_bar_config = {}
elif (not isinstance(self._progress_bar_config, dict)):
raise ValueError(f'`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}.')
return tqdm(iterable, **self._progress_bar_config)
def set_progress_bar_config(self, **kwargs):
self._progress_bar_config = kwargs |
def run(config):
print('making fragments from RGBD sequence.')
make_clean_folder(join(config['path_dataset'], config['folder_fragment']))
[color_files, depth_files] = get_rgbd_file_lists(config['path_dataset'])
n_files = len(color_files)
n_fragments = int(math.ceil((float(n_files) / config['n_frames_per_fragment'])))
if (config['python_multi_threading'] is True):
max_workers = min(max(1, (multiprocessing.cpu_count() - 1)), n_fragments)
os.environ['OMP_NUM_THREADS'] = '1'
mp_context = multiprocessing.get_context('spawn')
with mp_context.Pool(processes=max_workers) as pool:
args = [(fragment_id, color_files, depth_files, n_files, n_fragments, config) for fragment_id in range(n_fragments)]
pool.starmap(process_single_fragment, args)
else:
for fragment_id in range(n_fragments):
process_single_fragment(fragment_id, color_files, depth_files, n_files, n_fragments, config) |
class ASPResBlock(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ASPResBlock, self).__init__()
self.h = h
self.convs1 = nn.ModuleList([weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], padding=get_padding(kernel_size, dilation[0]))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], padding=get_padding(kernel_size, dilation[1]))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], padding=get_padding(kernel_size, dilation[2])))])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1)))])
self.convs2.apply(init_weights)
def forward(self, x):
for (c1, c2) in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = (xt + x)
return x |
def main():
parser = argparse.ArgumentParser(description='Tool to average the params of input checkpoints to produce a new checkpoint')
parser.add_argument('--inputs', required=True, nargs='+', help='Input checkpoint file paths.')
parser.add_argument('--output', required=True, metavar='FILE', help='Write the new checkpoint containing the averaged weights to this path.')
num_group = parser.add_mutually_exclusive_group()
num_group.add_argument('--num-epoch-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, and average last this many of them.')
num_group.add_argument('--num-update-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, and average last this many of them.')
parser.add_argument('--checkpoint-upper-bound', type=int, help='when using --num-epoch-checkpoints, this will set an upper bound on which epoch to use, when using --num-update-checkpoints, this will set an upper bound on which update to usee.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be averaged.e.g., with --num-update-checkpoints=10 --checkpoint-upper-bound=50000, checkpoints 40500-50000 would be averaged assuming --save-interval-updates 500')
args = parser.parse_args()
print(args)
num = None
is_update_based = False
if (args.num_update_checkpoints is not None):
num = args.num_update_checkpoints
is_update_based = True
elif (args.num_epoch_checkpoints is not None):
num = args.num_epoch_checkpoints
assert ((args.checkpoint_upper_bound is None) or ((args.num_epoch_checkpoints is not None) or (args.num_update_checkpoints is not None))), '--checkpoint-upper-bound requires --num-epoch-checkpoints or --num-update-checkpoints'
assert ((args.num_epoch_checkpoints is None) or (args.num_update_checkpoints is None)), 'Cannot combine --num-epoch-checkpoints and --num-update-checkpoints'
if (num is not None):
args.inputs = last_n_checkpoints(args.inputs, num, is_update_based, upper_bound=args.checkpoint_upper_bound)
print('averaging checkpoints: ', args.inputs)
new_state = average_checkpoints(args.inputs)
with PathManager.open(args.output, 'wb') as f:
torch.save(new_state, f)
print('Finished writing averaged checkpoint to {}.'.format(args.output)) |
def AddBLstmLayer(config_lines, name, input, cell_dim, recurrent_projection_dim=0, non_recurrent_projection_dim=0, clipping_threshold=1.0, zeroing_threshold=3.0, zeroing_interval=20, ng_per_element_scale_options='', ng_affine_options='', lstm_delay=[(- 1), 1], self_repair_scale_nonlinearity=None, max_change_per_component=0.75):
assert ((len(lstm_delay) == 2) and (lstm_delay[0] < 0) and (lstm_delay[1] > 0))
output_forward = AddLstmLayer(config_lines=config_lines, name='{0}_forward'.format(name), input=input, cell_dim=cell_dim, recurrent_projection_dim=recurrent_projection_dim, non_recurrent_projection_dim=non_recurrent_projection_dim, clipping_threshold=clipping_threshold, zeroing_threshold=zeroing_threshold, zeroing_interval=zeroing_interval, ng_per_element_scale_options=ng_per_element_scale_options, ng_affine_options=ng_affine_options, lstm_delay=lstm_delay[0], self_repair_scale_nonlinearity=self_repair_scale_nonlinearity, max_change_per_component=max_change_per_component)
output_backward = AddLstmLayer(config_lines=config_lines, name='{0}_backward'.format(name), input=input, cell_dim=cell_dim, recurrent_projection_dim=recurrent_projection_dim, non_recurrent_projection_dim=non_recurrent_projection_dim, clipping_threshold=clipping_threshold, zeroing_threshold=zeroing_threshold, zeroing_interval=zeroing_interval, ng_per_element_scale_options=ng_per_element_scale_options, ng_affine_options=ng_affine_options, lstm_delay=lstm_delay[1], self_repair_scale_nonlinearity=self_repair_scale_nonlinearity, max_change_per_component=max_change_per_component)
output_descriptor = 'Append({0}, {1})'.format(output_forward['descriptor'], output_backward['descriptor'])
output_dim = (output_forward['dimension'] + output_backward['dimension'])
return {'descriptor': output_descriptor, 'dimension': output_dim} |
class ContrastiveLoss(nn.Module):
def __init__(self, margin=0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, inputs, targets):
n = inputs.size(0)
dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)
dist = (dist + dist.t())
dist.addmm_(1, (- 2), inputs, inputs.t())
dist = dist.clamp(min=1e-12).sqrt()
mask = targets.expand(n, n).eq(targets.expand(n, n).t())
(dist_ap, dist_an) = ([], [])
labels = np.random.randint(0, 2, n)
eye = Variable((1 - torch.eye(n).byte())).cuda()
mask_ap = (mask * eye)
for i in range(n):
if (labels[i] == 1):
temp = dist[i][mask_ap[i]]
idx = np.random.randint(0, temp.size(0))
dist_ap.append(temp[idx])
else:
temp = dist[i][(mask[i] == 0)]
idx = np.random.randint(0, temp.size(0))
dist_an.append(temp[idx])
dist_ap = torch.cat(dist_ap)
dist_an = torch.cat(dist_an)
loss = (((1.0 * dist_ap.sum()) + (1.0 * torch.clamp((self.margin - dist_an), min=0.0).sum())) / n)
prec = ((((dist_an.data > self.margin).sum() + (dist_ap.data < self.margin).sum()) * 1.0) / n)
return (loss, prec) |
def _generate_subtokens(token_counts, alphabet, min_count, num_iterations=4, reserved_tokens=None):
if (reserved_tokens is None):
reserved_tokens = RESERVED_TOKENS
subtoken_list = (reserved_tokens + list(alphabet))
max_subtoken_length = 1
for i in xrange(num_iterations):
tf.compat.v1.logging.info(('\tGenerating subtokens: iteration %d' % i))
subtoken_dict = _list_to_index_dict(subtoken_list)
subtoken_counts = _count_and_gen_subtokens(token_counts, alphabet, subtoken_dict, max_subtoken_length)
(subtoken_list, max_subtoken_length) = _gen_new_subtoken_list(subtoken_counts, min_count, alphabet, reserved_tokens)
tf.compat.v1.logging.info(('\tVocab size: %d' % len(subtoken_list)))
return subtoken_list |
def main():
parser = argparse.ArgumentParser(description='PyTorch Transformer Language Model')
parser.add_argument('--model_name', type=str, default='transfo-xl-wt103', help='pretrained model name')
parser.add_argument('--split', type=str, default='test', choices=['all', 'valid', 'test'], help='which split to evaluate')
parser.add_argument('--batch_size', type=int, default=10, help='batch size')
parser.add_argument('--tgt_len', type=int, default=128, help='number of tokens to predict')
parser.add_argument('--ext_len', type=int, default=0, help='length of the extended context')
parser.add_argument('--mem_len', type=int, default=1600, help='length of the retained previous heads')
parser.add_argument('--clamp_len', type=int, default=1000, help='max positional embedding index')
parser.add_argument('--no_cuda', action='store_true', help='Do not use CUDA even though CUA is available')
parser.add_argument('--work_dir', type=str, required=True, help='path to the work_dir')
parser.add_argument('--no_log', action='store_true', help='do not log the eval result')
parser.add_argument('--same_length', action='store_true', help='set same length attention with masking')
parser.add_argument('--server_ip', type=str, default='', help='Can be used for distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='Can be used for distant debugging.')
args = parser.parse_args()
assert (args.ext_len >= 0), 'extended context length must be non-negative'
if (args.server_ip and args.server_port):
import ptvsd
print('Waiting for debugger attach')
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
logger.info('device: {}'.format(device))
corpus = TransfoXLCorpus.from_pretrained(args.model_name)
va_iter = corpus.get_iterator('valid', args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len)
te_iter = corpus.get_iterator('test', args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len)
model = TransfoXLLMHeadModel.from_pretrained(args.model_name)
model.to(device)
logger.info('Evaluating with bsz {} tgt_len {} ext_len {} mem_len {} clamp_len {}'.format(args.batch_size, args.tgt_len, args.ext_len, args.mem_len, args.clamp_len))
model.reset_memory_length(args.mem_len)
if (args.clamp_len > 0):
model.clamp_len = args.clamp_len
if args.same_length:
model.same_length = True
def evaluate(eval_iter):
model.eval()
(total_len, total_loss) = (0, 0.0)
start_time = time.time()
with torch.no_grad():
mems = None
for (idx, (data, target, seq_len)) in enumerate(eval_iter):
ret = model(data, lm_labels=target, mems=mems)
(loss, _, mems) = ret
loss = loss.mean()
total_loss += (seq_len * loss.item())
total_len += seq_len
total_time = (time.time() - start_time)
logger.info('Time : {:.2f}s, {:.2f}ms/segment'.format(total_time, ((1000 * total_time) / (idx + 1))))
return (total_loss / total_len)
if (args.split == 'all'):
test_loss = evaluate(te_iter)
valid_loss = evaluate(va_iter)
elif (args.split == 'valid'):
valid_loss = evaluate(va_iter)
test_loss = None
elif (args.split == 'test'):
test_loss = evaluate(te_iter)
valid_loss = None
def format_log(loss, split):
log_str = '| {0} loss {1:5.2f} | {0} ppl {2:9.3f} '.format(split, loss, math.exp(loss))
return log_str
log_str = ''
if (valid_loss is not None):
log_str += format_log(valid_loss, 'valid')
if (test_loss is not None):
log_str += format_log(test_loss, 'test')
logger.info(('=' * 100))
logger.info(log_str)
logger.info(('=' * 100)) |
def generate_cpp_module(fname='pau_cuda.cpp', coefficients=coefficients):
file_content = airspeed.Template('\n\\#include <torch/extension.h>\n\\#include <vector>\n\\#include <iostream>\n\n#define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor")\n#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")\n#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)\n\n#foreach ($coef in $coefficients)\nat::Tensor pau_cuda_forward_$coef[0]_$coef[1](torch::Tensor x, torch::Tensor n, torch::Tensor d);\nstd::vector<torch::Tensor> pau_cuda_backward_$coef[0]_$coef[1](torch::Tensor grad_output, torch::Tensor x, torch::Tensor n, torch::Tensor d);\n#end\n\n#foreach ($coef in $coefficients)\nat::Tensor pau_forward__$coef[0]_$coef[1](torch::Tensor x, torch::Tensor n, torch::Tensor d) {\n CHECK_INPUT(x);\n CHECK_INPUT(n);\n CHECK_INPUT(d);\n\n return pau_cuda_forward_$coef[0]_$coef[1](x, n, d);\n}\nstd::vector<torch::Tensor> pau_backward__$coef[0]_$coef[1](torch::Tensor grad_output, torch::Tensor x, torch::Tensor n, torch::Tensor d) {\n CHECK_INPUT(grad_output);\n CHECK_INPUT(x);\n CHECK_INPUT(n);\n CHECK_INPUT(d);\n\n return pau_cuda_backward_$coef[0]_$coef[1](grad_output, x, n, d);\n}\n#end\n\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n#foreach ($coef in $coefficients)\n m.def("forward_$coef[0]_$coef[1]", &pau_forward__$coef[0]_$coef[1], "PAU forward _$coef[0]_$coef[1]");\n m.def("backward_$coef[0]_$coef[1]", &pau_backward__$coef[0]_$coef[1], "PAU backward _$coef[0]_$coef[1]");\n#end\n}\n ')
content = file_content.merge(locals())
with open(fname, 'w') as text_file:
text_file.write(content) |
def cli_main():
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(args, main) |
def create_atoms(mol):
atoms = [a.GetSymbol() for a in mol.GetAtoms()]
for a in mol.GetAromaticAtoms():
i = a.GetIdx()
atoms[i] = (atoms[i], 'aromatic')
atoms = [atom_dict[a] for a in atoms]
return np.array(atoms) |
class TestAutoResetWrapper():
def fake_auto_reset_environment(self, fake_environment: Environment) -> AutoResetWrapper:
return AutoResetWrapper(fake_environment)
def fake_state_and_timestep(self, fake_auto_reset_environment: AutoResetWrapper, key: chex.PRNGKey) -> Tuple[(State, TimeStep[Observation])]:
(state, timestep) = jax.jit(fake_auto_reset_environment.reset)(key)
return (state, timestep)
def test_auto_reset_wrapper__init(self, fake_environment: Environment) -> None:
auto_reset_env = AutoResetWrapper(fake_environment)
assert isinstance(auto_reset_env, Environment)
def test_auto_reset_wrapper__auto_reset(self, fake_auto_reset_environment: AutoResetWrapper, fake_state_and_timestep: Tuple[(State, TimeStep[Observation])]) -> None:
(state, timestep) = fake_state_and_timestep
(_, reset_timestep) = jax.jit(fake_auto_reset_environment._auto_reset)(state, timestep)
chex.assert_trees_all_equal(timestep.observation, reset_timestep.observation)
def test_auto_reset_wrapper__step_no_reset(self, fake_auto_reset_environment: AutoResetWrapper, key: chex.PRNGKey) -> None:
(state, first_timestep) = fake_auto_reset_environment.reset(key)
action = fake_auto_reset_environment.action_spec().generate_value()
(state, timestep) = jax.jit(fake_auto_reset_environment.step)(state, action)
assert (timestep.step_type == StepType.MID)
assert_trees_are_different(timestep, first_timestep)
chex.assert_trees_all_equal(timestep.reward, 0)
def test_auto_reset_wrapper__step_reset(self, fake_environment: FakeEnvironment, fake_auto_reset_environment: AutoResetWrapper, key: chex.PRNGKey) -> None:
(state, first_timestep) = fake_auto_reset_environment.reset(key)
fake_environment.time_limit = 5
timestep = first_timestep
for _ in range(fake_environment.time_limit):
action = fake_auto_reset_environment.action_spec().generate_value()
(state, timestep) = jax.jit(fake_auto_reset_environment.step)(state, action)
assert (timestep.step_type == StepType.LAST)
chex.assert_trees_all_equal(timestep.observation, first_timestep.observation) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--source-dir', required=True, type=Path, help='source audio directory')
parser.add_argument('--target-dir', required=True, type=Path, help='target audio directory')
parser.add_argument('--data-split', default=['train', 'valid', 'test'], nargs='+', help='data split names')
parser.add_argument('--output-root', required=True, type=Path, help='output directory')
parser.add_argument('--win-length', type=int, default=1024)
parser.add_argument('--hop-length', type=int, default=256)
parser.add_argument('--n-fft', type=int, default=1024)
parser.add_argument('--n-mels', type=int, default=80)
parser.add_argument('--f-min', type=int, default=20)
parser.add_argument('--f-max', type=int, default=8000)
parser.add_argument('--sample-rate', type=int, default=22050)
parser.add_argument('--normalize-volume', '-n', action='store_true')
args = parser.parse_args()
process(args) |
def sparse_tensor(indices, values, shape):
return torch.sparse_coo_tensor(list(zip(*indices)), values, shape, requires_grad=True) |
def create_feedforward_Q_function(observation_shape, action_shape, *args, observation_preprocessor=None, name='feedforward_Q', **kwargs):
input_shapes = (observation_shape, action_shape)
preprocessors = (observation_preprocessor, None)
return feedforward_model(input_shapes, *args, output_size=1, preprocessors=preprocessors, name=name, **kwargs) |
class StableDropoutTestCase(TestCase):
('torch 2.0.0 gives `torch.onnx.errors.OnnxExporterError: Module onnx is not installed!`.')
_torch
.filterwarnings('ignore:.*Dropout.*:UserWarning:torch.onnx.*')
def test_training(self):
devnull = open(os.devnull, 'wb')
sd = modeling_deberta.StableDropout(0.1)
do_constant_folding = False
training = torch.onnx.TrainingMode.PRESERVE
input = (torch.randn(2, 2),)
torch.onnx.export(sd, input, devnull, opset_version=12, do_constant_folding=do_constant_folding, training=training)
with self.assertRaises(Exception):
torch.onnx.export(sd, input, devnull, opset_version=11, do_constant_folding=do_constant_folding, training=training) |
def groups(stream, size):
batch = []
for item in stream:
batch += [item]
if ((len(batch) % size) == 0):
(yield batch)
batch = []
if (len(batch) > 0):
(yield batch) |
def pa(X, Y):
XY = np.dot(X, Y.T)
XX = np.sum(np.square(X), axis=1)
XX = np.transpose([XX])
YY = np.sum(np.square(Y), axis=1)
dist = ((XX + YY) - (2 * XY))
return dist |
class NormalTanhPolicy(nn.Module):
hidden_dims: Sequence[int]
action_dim: int
state_dependent_std: bool = True
dropout_rate: Optional[float] = None
log_std_scale: float = 1.0
log_std_min: Optional[float] = None
log_std_max: Optional[float] = None
tanh_squash_distribution: bool = True
def __call__(self, observations: jnp.ndarray, temperature: float=1.0, training: bool=False) -> tfd.Distribution:
outputs = MLP(self.hidden_dims, activate_final=True, dropout_rate=self.dropout_rate)(observations, training=training)
means = nn.Dense(self.action_dim, kernel_init=default_init())(outputs)
if self.state_dependent_std:
log_stds = nn.Dense(self.action_dim, kernel_init=default_init(self.log_std_scale))(outputs)
else:
log_stds = self.param('log_stds', nn.initializers.zeros, (self.action_dim,))
log_std_min = (self.log_std_min or LOG_STD_MIN)
log_std_max = (self.log_std_max or LOG_STD_MAX)
log_stds = jnp.clip(log_stds, log_std_min, log_std_max)
if (not self.tanh_squash_distribution):
means = nn.tanh(means)
base_dist = tfd.MultivariateNormalDiag(loc=means, scale_diag=(jnp.exp(log_stds) * temperature))
if self.tanh_squash_distribution:
return tfd.TransformedDistribution(distribution=base_dist, bijector=tfb.Tanh())
else:
return base_dist |
def read_bleu_output():
params = [('all-cat', 93), ('old-cat', 48), ('new-cat', 44)]
for team in teams:
for param in params:
filelines = []
out = ''
for block_id in range(1, (param[1] + 1)):
with open((((((('eval/metric_per_block/bleu3ref-' + team) + '-') + param[0]) + '-') + str(block_id)) + '.txt'), 'r') as f:
firstline = f.readline()
beginning = firstline.split(',')[0]
bleu = beginning.split('= ')[1]
out += (((('Block-' + str(block_id)) + '\t') + bleu[:4]) + '\n')
with open((((('significance-2005-DARPA-NIST/bleu3ref-' + team) + '-') + param[0]) + '.blocks'), 'w+') as f_blocks:
f_blocks.write(out)
print('Scores were written to the significance-2005-DARPA-NIST directory.') |
()
('--input-path', '-i')
('--start-predictions-path', '-s')
('--model-path', '-m')
('--output-path', '-o')
('--batch-size', '-bs', default=16)
('--device', '-dv', default='cpu')
def main(input_path: str, start_predictions_path: str, model_path: str, output_path: str, batch_size: int, device: str) -> None:
logger = logging.Logger(name='diffusion/predict', level=logging.INFO)
logger.info('Loading model.')
model = MultinomialDiffusion.load(path=model_path)
model = model.to(device=device)
logger.info('Initializing decoder.')
decoder = DiffusionDecoder(model=model)
logger.info('Loading data.')
logger.info('Loading residues.')
residue_masses = yaml.safe_load(open(os.path.join(model_path, 'residues.yaml')))
residues = ResidueSet(residue_masses=residue_masses)
logger.info(f'Loading input data from {input_path}.')
input_data = polars.read_ipc(input_path)
logger.info(f'Loading predictions from {start_predictions_path}.')
start_predictions = pandas.read_csv(start_predictions_path)
input_dataset = AnnotatedPolarsSpectrumDataset(data_frame=input_data, peptides=start_predictions['Predictions'].tolist())
data_loader = torch.utils.data.DataLoader(input_dataset, shuffle=False, batch_size=batch_size, collate_fn=collate_batches(residues=residues, time_steps=model.time_steps, annotated=True, max_length=model.config.max_length))
logger.info('Performing decoding.')
results = []
all_log_probs = []
with torch.no_grad():
for (spectra, spectra_padding_mask, precursors, peptides, _) in tqdm.tqdm(iter(data_loader), total=len(data_loader)):
(predictions, log_probs) = decoder.decode(initial_sequence=peptides.to(device), spectra=spectra.to(device), spectra_padding_mask=spectra_padding_mask.to(device), precursors=precursors.to(device), start_step=DIFFUSION_START_STEP)
predictions = [(prediction if ('$' not in prediction) else prediction[:prediction.index('$')]) for prediction in predictions]
predictions = [''.join(prediction) for prediction in predictions]
results.extend(predictions)
all_log_probs.extend(log_probs)
logger.info('Saving predictions.')
output = input_data.to_pandas()
output['diffusion_predictions'] = results
output['diffusion_log_probs'] = all_log_probs
output.to_csv(output_path) |
def dict_deep_overlay(*data, list_replace=False):
if (len(data) == 1):
return data[0]
elif (len(data) != 2):
head = dict_deep_overlay(data[0], data[1], list_replace=list_replace)
return dict_deep_overlay(head, *data[2:], list_replace=list_replace)
(original, overlay) = data
if (isinstance(original, (list, tuple)) and isinstance(overlay, dict)):
for (key, item) in overlay.items():
assert isinstance(key, int)
original[key] = dict_deep_overlay(original[key], item)
elif (not isinstance(original, type(overlay))):
return overlay
elif isinstance(overlay, dict):
for (key, item) in overlay.items():
_dict_deep_overlay_item(original, key, item, list_replace)
elif (isinstance(overlay, list) and (not list_replace)):
raise ValueError(('Cannot implicitly merge two lists, use key* or key+ ' + ('when inheriting: (list1: %s, list2: %s)' % (str(original), str(overlay)))))
else:
return overlay
return original |
def _define_hparam(hparams, hparam_name, default_val, random_val_fn):
hparams[hparam_name] = (hparams, hparam_name, default_val, random_val_fn) |
def _get_component_dropout(dropout_schedule, data_fraction):
if (data_fraction == 0):
assert (dropout_schedule[(- 1)][0] == 0)
return dropout_schedule[(- 1)][1]
try:
(dropout_schedule_index, initial_data_fraction, initial_dropout) = next(((i, tup[0], tup[1]) for (i, tup) in enumerate(dropout_schedule) if (tup[0] <= data_fraction)))
except StopIteration:
raise RuntimeError('Could not find data_fraction in dropout schedule corresponding to data_fraction {0}.\nMaybe something wrong with the parsed dropout schedule {1}.'.format(data_fraction, dropout_schedule))
if (dropout_schedule_index == 0):
assert ((dropout_schedule[0][0] == 1) and (data_fraction == 1))
return dropout_schedule[0][1]
(final_data_fraction, final_dropout) = dropout_schedule[(dropout_schedule_index - 1)]
if (final_data_fraction == initial_data_fraction):
assert (data_fraction == initial_data_fraction)
return initial_dropout
assert ((data_fraction >= initial_data_fraction) and (data_fraction < final_data_fraction))
return ((((data_fraction - initial_data_fraction) * (final_dropout - initial_dropout)) / (final_data_fraction - initial_data_fraction)) + initial_dropout) |
class KeyphraseDataset(torch.utils.data.Dataset):
def __init__(self, examples, word2idx, idx2word, device, load_train=True, fix_kp_num_len=False, max_kp_len=6, max_kp_num=20, seperate_pre_ab=False):
keys = ['src', 'src_oov', 'oov_dict', 'oov_list', 'src_str', 'trg_str', 'trg', 'trg_copy']
filtered_examples = []
for e in examples:
filtered_example = {}
for k in keys:
filtered_example[k] = e[k]
if ('oov_list' in filtered_example):
filtered_example['oov_number'] = len(filtered_example['oov_list'])
filtered_examples.append(filtered_example)
self.examples = filtered_examples
self.word2idx = word2idx
self.id2xword = idx2word
self.load_train = load_train
self.device = device
self.fix_kp_num_len = fix_kp_num_len
if self.fix_kp_num_len:
self.max_kp_len = max_kp_len
self.max_kp_num = max_kp_num
self.seperate_pre_ab = seperate_pre_ab
def build(cls, examples, opt, load_train):
return cls(examples, device=opt.device, word2idx=opt.vocab['word2idx'], idx2word=opt.vocab['idx2word'], load_train=load_train, fix_kp_num_len=opt.fix_kp_num_len, max_kp_len=opt.max_kp_len, max_kp_num=opt.max_kp_num, seperate_pre_ab=opt.seperate_pre_ab)
def __getitem__(self, index):
return self.examples[index]
def __len__(self):
return len(self.examples)
def _pad(self, input_list):
input_list_lens = [len(l) for l in input_list]
max_seq_len = max(input_list_lens)
padded_batch = (self.word2idx[PAD_WORD] * np.ones((len(input_list), max_seq_len)))
for j in range(len(input_list)):
current_len = input_list_lens[j]
padded_batch[j][:current_len] = input_list[j]
padded_batch = torch.LongTensor(padded_batch)
input_mask = torch.ne(padded_batch, self.word2idx[PAD_WORD]).type(torch.FloatTensor)
return (padded_batch, input_list_lens, input_mask)
def _pad2d(self, input_list):
input_list_lens = [[len(t) for t in ts] for ts in input_list]
padded_batch = torch.LongTensor(input_list)
input_mask = torch.ne(padded_batch, self.word2idx[PAD_WORD]).type(torch.FloatTensor)
return (padded_batch, input_list_lens, input_mask)
def collate_fn_common(self, batches, trg=None, trg_oov=None):
src = [b['src'] for b in batches]
src_oov = [b['src_oov'] for b in batches]
oov_lists = [b['oov_list'] for b in batches]
src_str = [b['src_str'] for b in batches]
trg_str = [b['trg_str'] for b in batches]
batch_size = len(src)
original_indices = list(range(batch_size))
src_lens = [len(i) for i in src]
seq_pairs = sorted(zip(src_lens, src, src_oov, oov_lists, src_str, trg_str, original_indices), key=(lambda p: p[0]), reverse=True)
(_, src, src_oov, oov_lists, src_str, trg_str, original_indices) = zip(*seq_pairs)
if self.load_train:
seq_pairs = sorted(zip(src_lens, trg, trg_oov), key=(lambda p: p[0]), reverse=True)
(_, trg, trg_oov) = zip(*seq_pairs)
(src, src_lens, src_mask) = self._pad(src)
(src_oov, _, _) = self._pad(src_oov)
src = src.to(self.device)
src_mask = src_mask.to(self.device)
src_oov = src_oov.to(self.device)
if self.load_train:
if self.fix_kp_num_len:
(trg, trg_lens, trg_mask) = self._pad2d(trg)
(trg_oov, _, _) = self._pad2d(trg_oov)
else:
(trg, trg_lens, trg_mask) = self._pad(trg)
(trg_oov, _, _) = self._pad(trg_oov)
trg = trg.to(self.device)
trg_mask = trg_mask.to(self.device)
trg_oov = trg_oov.to(self.device)
else:
(trg_lens, trg_mask) = (None, None)
return (src, src_lens, src_mask, src_oov, oov_lists, src_str, trg_str, trg, trg_oov, trg_lens, trg_mask, original_indices)
def collate_fn_one2one(self, batches):
if self.load_train:
trg = [(b['trg'] + [self.word2idx[EOS_WORD]]) for b in batches]
trg_oov = [(b['trg_copy'] + [self.word2idx[EOS_WORD]]) for b in batches]
return self.collate_fn_common(batches, trg, trg_oov)
else:
return self.collate_fn_common(batches)
def collate_fn_one2seq(self, batches):
if self.load_train:
trg = []
trg_oov = []
for b in batches:
trg_concat = []
trg_oov_concat = []
trg_size = len(b['trg'])
assert (len(b['trg']) == len(b['trg_copy']))
for (trg_idx, (trg_phase, trg_phase_oov)) in enumerate(zip(b['trg'], b['trg_copy'])):
if (self.word2idx[PEOS_WORD] in trg_phase):
continue
if (trg_idx == (trg_size - 1)):
trg_concat += (trg_phase + [self.word2idx[EOS_WORD]])
trg_oov_concat += (trg_phase_oov + [self.word2idx[EOS_WORD]])
else:
trg_concat += (trg_phase + [self.word2idx[SEP_WORD]])
trg_oov_concat += (trg_phase_oov + [self.word2idx[SEP_WORD]])
trg.append(trg_concat)
trg_oov.append(trg_oov_concat)
return self.collate_fn_common(batches, trg, trg_oov)
else:
return self.collate_fn_common(batches)
def collate_fn_fixed_tgt(self, batches):
if self.load_train:
if self.seperate_pre_ab:
trg = []
trg_oov = []
for b in batches:
targets = [t for t in b['trg'] if (len(t) <= (self.max_kp_len - 1))]
oov_targets = [t for t in b['trg_copy'] if (len(t) <= (self.max_kp_len - 1))]
assert ([self.word2idx[PEOS_WORD]] in targets), 'the original training keyphrases must be seperated by <peos> !'
peos_idx = targets.index([self.word2idx[PEOS_WORD]])
present_targets = targets[:peos_idx][:(self.max_kp_num // 2)]
absent_targets = targets[(peos_idx + 1):][:(self.max_kp_num // 2)]
present_targets_oov = oov_targets[:peos_idx][:(self.max_kp_num // 2)]
absent_targets_oov = oov_targets[(peos_idx + 1):][:(self.max_kp_num // 2)]
present_targets = [((t + [self.word2idx[EOS_WORD]]) + ([self.word2idx[PAD_WORD]] * ((self.max_kp_len - len(t)) - 1))) for t in present_targets]
present_targets_oov = [((t + [self.word2idx[EOS_WORD]]) + ([self.word2idx[PAD_WORD]] * ((self.max_kp_len - len(t)) - 1))) for t in present_targets_oov]
extra_present_targets = ([([self.word2idx[NULL_WORD]] + ([self.word2idx[PAD_WORD]] * (self.max_kp_len - 1)))] * ((self.max_kp_num // 2) - len(present_targets)))
absent_targets = [((t + [self.word2idx[EOS_WORD]]) + ([self.word2idx[PAD_WORD]] * ((self.max_kp_len - len(t)) - 1))) for t in absent_targets]
absent_targets_oov = [((t + [self.word2idx[EOS_WORD]]) + ([self.word2idx[PAD_WORD]] * ((self.max_kp_len - len(t)) - 1))) for t in absent_targets_oov]
extra_absent_targets = ([([self.word2idx[NULL_WORD]] + ([self.word2idx[PAD_WORD]] * (self.max_kp_len - 1)))] * ((self.max_kp_num // 2) - len(absent_targets)))
trg.append((((present_targets + extra_present_targets) + absent_targets) + extra_absent_targets))
trg_oov.append((((present_targets_oov + extra_present_targets) + absent_targets_oov) + extra_absent_targets))
else:
trg = []
trg_oov = []
for b in batches:
targets = [((t + [self.word2idx[EOS_WORD]]) + ([self.word2idx[PAD_WORD]] * ((self.max_kp_len - len(t)) - 1))) for t in b['trg'] if (len(t) <= (self.max_kp_len - 1))][:self.max_kp_num]
oov_targets = [((t + [self.word2idx[EOS_WORD]]) + ([self.word2idx[PAD_WORD]] * ((self.max_kp_len - len(t)) - 1))) for t in b['trg_copy'] if (len(t) <= (self.max_kp_len - 1))][:self.max_kp_num]
extra_targets = ([([self.word2idx[NULL_WORD]] + ([self.word2idx[PAD_WORD]] * (self.max_kp_len - 1)))] * (self.max_kp_num - len(targets)))
trg.append((targets + extra_targets))
trg_oov.append((oov_targets + extra_targets))
return self.collate_fn_common(batches, trg, trg_oov)
else:
return self.collate_fn_common(batches) |
def format_train():
qrels = defaultdict(set)
f = open(os.path.join(input_dir, f'train_candidates.txt'))
f.readline()
for line in f:
(qid, ansid, _) = line.split(',')
qrels[qid].add(ansid)
f = open(os.path.join(input_dir, f'question.csv'))
f.readline()
with open(os.path.join(output_dir, 'query.train'), 'w') as query_output, open(os.path.join(output_dir, 'qrels.train'), 'w') as qrel_output:
for line in f:
(qid, query) = line.split(',', maxsplit=1)
query = query.strip()
if (qid in qrels):
query_output.write(f'''{qid} {query}
''')
for ansid in qrels[qid]:
qrel_output.write(f'''{qid} 0 {ansid} 1
''') |
def test_experiment_run_access_subingredient():
somemod = Ingredient('somemod')
def cfg():
a = 5
b = 'foo'
ex = Experiment('some_experiment', ingredients=[somemod])
def main(somemod):
return somemod
r = ex.run().result
assert (r['a'] == 5)
assert (r['b'] == 'foo') |
class ExperimentTemplate():
def __init__(self, *, function, log_dir, name, prefix, snapshot_mode, snapshot_gap, archive_launch_repo, name_parameters, use_existing_dir):
self.function = function
self.log_dir = log_dir
self.name = name
self.prefix = prefix
self.snapshot_mode = snapshot_mode
self.snapshot_gap = snapshot_gap
self.archive_launch_repo = archive_launch_repo
self.name_parameters = name_parameters
self.use_existing_dir = use_existing_dir
if (self.function is not None):
self._update_wrap_params()
def _update_wrap_params(self):
functools.update_wrapper(self, self.function)
self.__signature__ = _make_experiment_signature(self.function)
def _augment_name(cls, options, name, params):
name_parameters = collections.OrderedDict()
if (options['name_parameters'] == 'passed'):
for param in options['signature'].parameters.values():
try:
name_parameters[param.name] = params[param.name]
except KeyError:
pass
elif (options['name_parameters'] == 'all'):
for param in options['signature'].parameters.values():
name_parameters[param.name] = params.get(param.name, param.default)
elif (options['name_parameters'] is not None):
raise ValueError('wrap_experiment.name_parameters should be set to one of None, "passed", or "all"')
param_str = '_'.join(('{}={}'.format(k, v) for (k, v) in name_parameters.items()))
if param_str:
return '{}_{}'.format(name, param_str)
else:
return name
def _get_options(self, *args):
options = dict(name=self.name, function=self.function, prefix=self.prefix, name_parameters=self.name_parameters, log_dir=self.log_dir, archive_launch_repo=self.archive_launch_repo, snapshot_gap=self.snapshot_gap, snapshot_mode=self.snapshot_mode, use_existing_dir=self.use_existing_dir, signature=self.__signature__)
if args:
if ((len(args) == 1) and isinstance(args[0], dict)):
for k in args[0]:
if (k not in options):
raise ValueError('Unknown key {} in wrap_experiment options'.format(k))
options.update(args[0])
else:
raise ValueError('garage.experiment currently only supports keyword arguments')
return options
def _make_context(cls, options, **kwargs):
name = options['name']
if (name is None):
name = options['function'].__name__
name = cls._augment_name(options, name, kwargs)
log_dir = options['log_dir']
if (log_dir is None):
log_dir = '{data}/local/{prefix}/{name}'.format(data=os.path.join(os.getcwd(), 'data'), prefix=options['prefix'], name=name)
if options['use_existing_dir']:
os.makedirs(log_dir, exist_ok=True)
else:
log_dir = _make_sequential_log_dir(log_dir)
tabular_log_file = os.path.join(log_dir, 'progress.csv')
text_log_file = os.path.join(log_dir, 'debug.log')
variant_log_file = os.path.join(log_dir, 'variant.json')
metadata_log_file = os.path.join(log_dir, 'metadata.json')
tb_dir = os.path.join(log_dir, 'tb')
tabular_log_file_eval = os.path.join(log_dir, 'progress_eval.csv')
text_log_file_eval = os.path.join(log_dir, 'debug_eval.log')
tb_dir_eval = os.path.join(log_dir, 'tb_eval')
tb_dir_plot = os.path.join(log_dir, 'tb_plot')
text_log_file_tcp = os.path.join(log_dir, 'debug_tcp.log')
dump_json(variant_log_file, kwargs)
(git_root_path, metadata) = get_metadata()
dump_json(metadata_log_file, metadata)
if (git_root_path and options['archive_launch_repo']):
make_launcher_archive(git_root_path=git_root_path, log_dir=log_dir)
logger.add_output(dowel.TextOutput(text_log_file))
logger.add_output(dowel.CsvOutput(tabular_log_file))
logger.add_output(dowel.TensorBoardOutput(tb_dir, x_axis='TotalEnvSteps'))
logger.add_output(dowel.StdOutput())
dowel_eval = dowel_wrapper.get_dowel('eval')
logger_eval = dowel_eval.logger
logger_eval.add_output(dowel_eval.TextOutput(text_log_file_eval))
logger_eval.add_output(dowel_eval.CsvOutput(tabular_log_file_eval))
logger_eval.add_output(dowel_eval.TensorBoardOutput(tb_dir_eval, x_axis='TotalEnvSteps'))
logger_eval.add_output(dowel_eval.StdOutput())
dowel_plot = dowel_wrapper.get_dowel('plot')
logger_plot = dowel_plot.logger
logger_plot.add_output(dowel_plot.TensorBoardOutput(tb_dir_plot, x_axis='TotalEnvSteps'))
dowel_tcp = dowel_wrapper.get_dowel('tcp')
logger_tcp = dowel_tcp.logger
logger_tcp.add_output(dowel_tcp.TextOutput(text_log_file_tcp))
logger_tcp.add_output(dowel_tcp.StdOutput())
logger.push_prefix('[{}] '.format(name))
logger.log('Logging to {}'.format(log_dir))
git_commit = get_git_commit_hash()
logger.log('Git commit: {}'.format(git_commit))
git_diff_file_path = os.path.join(log_dir, 'git_diff_{}.patch'.format(git_commit))
save_git_diff_to_file(git_diff_file_path)
return ExperimentContext(snapshot_dir=log_dir, snapshot_mode=options['snapshot_mode'], snapshot_gap=options['snapshot_gap'])
def __call__(self, *args, **kwargs):
if (self.function is None):
if ((len(args) != 1) or (len(kwargs) != 0) or (not callable(args[0]))):
raise ValueError('Please apply the result of wrap_experiment() to a single function')
self.function = args[0]
self._update_wrap_params()
return self
else:
ctxt = self._make_context(self._get_options(*args), **kwargs)
result = self.function(ctxt, **kwargs)
logger.remove_all()
logger.pop_prefix()
gc.collect()
return result |
def test():
current_file = os.path.dirname(__file__)
print('Picasso has been successfully imported!')
print(('Version: ' + open(os.path.join(current_file, './VERSION')).read().strip())) |
def get_model_parallel_src_rank():
global_rank = torch.distributed.get_rank()
local_world_size = get_model_parallel_world_size()
return ((global_rank // local_world_size) * local_world_size) |
class MoverScoreMetric(Metric):
def __init__(self, version=2, stop_wordsf=os.path.join(dirname, 'examples/stopwords.txt'), n_gram=1, remove_subwords=True, batch_size=48):
self.version = version
if (self.version == 1):
from moverscore import get_idf_dict, word_mover_score
else:
from moverscore_v2 import get_idf_dict, word_mover_score
self.get_idf_dict = get_idf_dict
self.word_mover_score = word_mover_score
stop_words = []
if (stop_wordsf is not None):
with open(stop_wordsf) as inputf:
stop_words = inputf.read().strip().split(' ')
self.stop_words = stop_words
self.n_gram = n_gram
self.remove_subwords = remove_subwords
self.batch_size = batch_size
def evaluate_example(self, summary, reference):
idf_dict_ref = defaultdict((lambda : 1.0))
idf_dict_hyp = defaultdict((lambda : 1.0))
score = self.word_mover_score([reference], [summary], idf_dict_ref, idf_dict_hyp, stop_words=self.stop_words, n_gram=self.n_gram, remove_subwords=self.remove_subwords)
score_dict = {'mover_score': score[0]}
return score_dict
def evaluate_batch(self, summaries, references, aggregate=True):
refs = references
if isinstance(references[0], list):
refs = [' '.join(ref) for ref in references]
idf_dict_summ = self.get_idf_dict(summaries)
idf_dict_ref = self.get_idf_dict(refs)
scores = []
if isinstance(references[0], list):
for (reference, summary) in zip(references, summaries):
s = self.word_mover_score(reference, ([summary] * len(reference)), idf_dict_ref, idf_dict_summ, stop_words=self.stop_words, n_gram=self.n_gram, remove_subwords=self.remove_subwords, batch_size=self.batch_size)
scores.append(np.mean(s))
else:
scores = self.word_mover_score(references, summaries, idf_dict_ref, idf_dict_summ, stop_words=self.stop_words, n_gram=self.n_gram, remove_subwords=self.remove_subwords, batch_size=self.batch_size)
if aggregate:
return {'mover_score': (sum(scores) / len(scores))}
else:
score_dict = [{'mover_score': score} for score in scores]
return score_dict
def supports_multi_ref(self):
return True |
class GPyGP(BaseModel):
def __init__(self, num_cont, num_enum, num_out, **conf):
super().__init__(num_cont, num_enum, num_out, **conf)
total_dim = num_cont
if (num_enum > 0):
self.one_hot = OneHotTransform(self.conf['num_uniqs'])
total_dim += self.one_hot.num_out
self.xscaler = MinMaxScaler(((- 1), 1))
self.yscaler = StandardScaler()
self.verbose = self.conf.get('verbose', False)
self.num_epochs = self.conf.get('num_epochs', 200)
self.warp = self.conf.get('warp', True)
self.space = self.conf.get('space')
self.num_restarts = self.conf.get('num_restarts', 20)
if ((self.space is None) and self.warp):
warnings.warn('Space not provided, set warp to False')
self.warp = False
if self.warp:
for i in range(total_dim):
logging.getLogger(f'a{i}').disabled = True
logging.getLogger(f'b{i}').disabled = True
def fit_scaler(self, Xc: np.ndarray, y: np.ndarray):
if ((Xc is not None) and (Xc.shape[1] > 0)):
if (self.space is not None):
opt_lb = self.space.opt_lb
opt_ub = self.space.opt_ub
num = self.space.num_numeric
cont_lb = opt_lb[:num].astype('float').reshape([1, (- 1)])
cont_ub = opt_ub[:num].astype('float').reshape([1, (- 1)])
concat_x = np.concatenate([Xc, cont_lb, cont_ub], axis=0)
self.xscaler.fit(concat_x)
else:
self.xscaler.fit(Xc)
self.yscaler.fit(y)
def trans(self, Xc: np.ndarray, Xe: np.ndarray, y: np.ndarray=None):
if ((Xc is not None) and (Xc.shape[1] > 0)):
Xc_t = self.xscaler.transform(Xc)
else:
Xc_t = np.zeros((Xe.shape[0], 0))
if ((Xe is None) or (Xe.shape[1] == 0)):
Xe_t = np.zeros((Xc.shape[0], 0))
else:
Xe_t = self.one_hot(Xe.astype('int'))
Xall = np.concatenate([Xc_t, Xe_t], axis=1)
if (y is not None):
y_t = self.yscaler.transform(y)
return (Xall, y_t)
return Xall
def fit(self, Xc: np.ndarray, Xe: np.ndarray, y: np.ndarray):
(Xc, Xe, y) = filter_nan(Xc, Xe, y, 'all')
self.fit_scaler(Xc, y)
(X, y) = self.trans(Xc, Xe, y)
k1 = GPy.kern.Linear(X.shape[1], ARD=False)
k2 = GPy.kern.Matern32(X.shape[1], ARD=True)
k2.lengthscale = np.std(X, axis=0).clip(min=0.02)
k2.variance.set_prior(GPy.priors.Gamma(0.5, 1), warning=False)
kern = (k1 + k2)
if (not self.warp):
self.gp = GPy.models.GPRegression(X, y, kern)
else:
xmin = np.zeros(X.shape[1])
xmax = np.ones(X.shape[1])
xmin[:Xc.shape[1]] = (- 1)
warp_f = KumarWarping(X, Xmin=xmin, Xmax=xmax)
self.gp = InputWarpedGP(X, y, kern, warping_function=warp_f)
log_gauss_prior = GPy.priors.LogGaussian((- 4.63), 0.5)
self.gp.likelihood.variance.set_prior(log_gauss_prior, warning=False)
self.gp.optimize_restarts(max_iters=self.num_epochs, verbose=self.verbose, num_restarts=self.num_restarts, robust=True)
return self
def predict(self, Xc, Xe):
Xall = self.trans(Xc, Xe)
(py, ps2) = self.gp.predict(Xall)
mu = self.yscaler.inverse_transform(py.reshape(((- 1), 1)))
var = ((self.yscaler.scale_ ** 2) * ps2.reshape(((- 1), 1)))
return (mu, np.clip(var, a_min=np.finfo(var.dtype).eps, a_max=None))
def sample_f(self):
raise NotImplementedError('Thompson sampling is not supported for GP')
def noise(self):
var_normalized = self.gp.likelihood.variance[0]
return (var_normalized * (self.yscaler.scale_ ** 2)).reshape(self.num_out) |
def patch_embed_forward(self, x):
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return x |
def load_model(model, checkpoint, args, mode='exact', train_mode='finetune', verbose=True, DEBUG=False):
n_gpu = args.n_gpu
device = args.device
local_rank = (- 1)
if (checkpoint in [None, 'None']):
if verbose:
logger.info(('no checkpoint provided for %s!' % model._get_name()))
else:
if (not os.path.exists(checkpoint)):
raise ValueError(('checkpoint %s not exist' % checkpoint))
if verbose:
logger.info(('loading %s finetuned model from %s' % (model._get_name(), checkpoint)))
model_state_dict = torch.load(checkpoint)
old_keys = []
new_keys = []
for key in model_state_dict.keys():
new_key = None
if ('gamma' in key):
new_key = key.replace('gamma', 'weight')
if ('beta' in key):
new_key = key.replace('beta', 'bias')
if key.startswith('module.'):
new_key = key.replace('module.', '')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for (old_key, new_key) in zip(old_keys, new_keys):
model_state_dict[new_key] = model_state_dict.pop(old_key)
del_keys = []
keep_keys = []
if (mode == 'exact'):
pass
elif (mode == 'encoder'):
for t in list(model_state_dict.keys()):
if (('classifier' in t) or ('cls' in t)):
del model_state_dict[t]
del_keys.append(t)
else:
keep_keys.append(t)
elif (mode == 'classifier'):
for t in list(model_state_dict.keys()):
if ('classifier' not in t):
del model_state_dict[t]
del_keys.append(t)
else:
keep_keys.append(t)
elif (mode == 'student'):
model_keys = model.state_dict().keys()
for t in list(model_state_dict.keys()):
if (t not in model_keys):
del model_state_dict[t]
del_keys.append(t)
else:
keep_keys.append(t)
else:
raise ValueError(('%s not available for now' % mode))
model.load_state_dict(model_state_dict)
if (mode != 'exact'):
logger.info(('delete %d layers, keep %d layers' % (len(del_keys), len(keep_keys))))
if DEBUG:
print('deleted keys =\n {}'.format('\n'.join(del_keys)))
print(('*' * 77))
print('kept keys =\n {}'.format('\n'.join(keep_keys)))
if args.fp16:
logger.info('fp16 activated, now call model.half()')
model.half()
model.to(device)
if (train_mode != 'finetune'):
if verbose:
logger.info('freeze BERT layer in DEBUG mode')
model.set_mode(train_mode)
if (local_rank != (- 1)):
raise NotImplementedError('not implemented for local_rank != 1')
elif (n_gpu > 1):
logger.info('data parallel because more than one gpu')
model = torch.nn.DataParallel(model)
return model |
class SpatialBatchNormalization(Layer):
def __init__(self, n_output, eps=1e-05, momentum=0.1, affine=True, init_weight=None, init_bias=None, init_grad_weight=None, init_grad_bias=None, data_format='NCHW', bigdl_type='float'):
super(SpatialBatchNormalization, self).__init__(None, bigdl_type, n_output, eps, momentum, affine, JTensor.from_ndarray(init_weight), JTensor.from_ndarray(init_bias), JTensor.from_ndarray(init_grad_weight), JTensor.from_ndarray(init_grad_bias), data_format)
def set_init_method(self, weight_init_method=None, bias_init_method=None):
callBigDlFunc(self.bigdl_type, 'setInitMethod', self.value, weight_init_method, bias_init_method)
return self |
class Conf(object):
def __init__(self, cfg_fname):
assert (cfg_fname is not None)
self.usr_cfg = DotDict(self._read_cfg(cfg_fname))
def _read_cfg(self, cfg_fname):
try:
with open(cfg_fname, 'r') as f:
content = f.read()
cfg = yaml.safe_load(content)
validated_cfg = schema.validate(cfg)
if ('version' not in cfg):
leading_whitespace = re.search('[ \\t]*model\\s*:', content).group().split('model')[0]
content = re.sub('model\\s*:', 'version: {}\n\n{}model:'.format(float(__version__.split('.')[0]), leading_whitespace), content)
with open(cfg_fname, 'w') as f:
f.write(content)
return validated_cfg
except FileNotFoundError as f:
logger.error('{}.'.format(f))
raise RuntimeError('The yaml file is not exist. Please check the file name or path.')
except Exception as e:
logger.error('{}.'.format(e))
raise RuntimeError('The yaml file format is not correct. Please refer to document.')
def map_pyconfig_to_cfg(self, pythonic_config):
mapping = {}
if (pythonic_config.quantization is not None):
mapping.update({'device': pythonic_config.quantization.device, 'model.inputs': pythonic_config.quantization.inputs, 'model.outputs': pythonic_config.quantization.outputs, 'model.backend': pythonic_config.quantization.backend, 'model.quant_format': pythonic_config.quantization.quant_format, 'model.domain': pythonic_config.quantization.domain, 'quantization.recipes': pythonic_config.quantization.recipes, 'quantization.approach': pythonic_config.quantization.approach, 'quantization.example_inputs': pythonic_config.quantization.example_inputs, 'quantization.calibration.sampling_size': pythonic_config.quantization.calibration_sampling_size, 'quantization.optype_wise': pythonic_config.quantization.op_type_dict, 'quantization.op_wise': pythonic_config.quantization.op_name_dict, 'tuning.strategy.name': pythonic_config.quantization.strategy, 'tuning.accuracy_criterion.relative': pythonic_config.quantization.accuracy_criterion.relative, 'tuning.accuracy_criterion.absolute': pythonic_config.quantization.accuracy_criterion.absolute, 'tuning.accuracy_criterion.higher_is_better': pythonic_config.quantization.accuracy_criterion.higher_is_better, 'tuning.objective': pythonic_config.quantization.objective, 'tuning.exit_policy.timeout': pythonic_config.quantization.timeout, 'tuning.exit_policy.max_trials': pythonic_config.quantization.max_trials, 'tuning.exit_policy.performance_only': pythonic_config.quantization.performance_only, 'use_bf16': pythonic_config.quantization.use_bf16, 'quantization.quant_level': pythonic_config.quantization.quant_level, 'reduce_range': pythonic_config.quantization.reduce_range})
if pythonic_config.quantization.diagnosis:
mapping.update({'tuning.diagnosis': True, 'tuning.exit_policy.max_trials': 1})
if pythonic_config.quantization.strategy_kwargs:
st_kwargs = pythonic_config.quantization.strategy_kwargs
for st_key in ['sigopt_api_token', 'sigopt_project_id', 'sigopt_experiment_name', 'accuracy_weight', 'latency_weight', 'hawq_v2_loss', 'confidence_batches']:
if (st_key in st_kwargs):
st_val = st_kwargs[st_key]
mapping.update({('tuning.strategy.' + st_key): st_val})
if (pythonic_config.distillation is not None):
mapping.update({'distillation.train.criterion': pythonic_config.distillation.criterion, 'distillation.train.optimizer': pythonic_config.distillation.optimizer})
if (pythonic_config.pruning is not None):
mapping.update({'pruning.approach.weight_compression': pythonic_config.pruning.weight_compression})
if (pythonic_config.nas is not None):
mapping.update({'nas.approach': pythonic_config.nas.approach, 'nas.search': pythonic_config.nas.search, 'nas.dynas': pythonic_config.nas.dynas})
if (pythonic_config.options is not None):
mapping.update({'tuning.random_seed': pythonic_config.options.random_seed, 'tuning.workspace.path': pythonic_config.options.workspace, 'tuning.workspace.resume': pythonic_config.options.resume_from, 'tuning.tensorboard': pythonic_config.options.tensorboard})
if (pythonic_config.benchmark is not None):
if (pythonic_config.benchmark.inputs != []):
mapping.update({'model.inputs': pythonic_config.benchmark.inputs})
if (pythonic_config.benchmark.outputs != []):
mapping.update({'model.outputs': pythonic_config.benchmark.outputs})
mapping.update({'evaluation.performance.warmup': pythonic_config.benchmark.warmup, 'evaluation.performance.iteration': pythonic_config.benchmark.iteration, 'evaluation.performance.configs.cores_per_instance': pythonic_config.benchmark.cores_per_instance, 'evaluation.performance.configs.num_of_instance': pythonic_config.benchmark.num_of_instance, 'evaluation.performance.configs.inter_num_of_threads': pythonic_config.benchmark.inter_num_of_threads, 'evaluation.performance.configs.intra_num_of_threads': pythonic_config.benchmark.intra_num_of_threads, 'evaluation.accuracy.configs.cores_per_instance': pythonic_config.benchmark.cores_per_instance, 'evaluation.accuracy.configs.num_of_instance': pythonic_config.benchmark.num_of_instance, 'evaluation.accuracy.configs.inter_num_of_threads': pythonic_config.benchmark.inter_num_of_threads, 'evaluation.accuracy.configs.intra_num_of_threads': pythonic_config.benchmark.intra_num_of_threads})
if pythonic_config.benchmark.diagnosis:
mapping.update({'evaluation.diagnosis': pythonic_config.benchmark.diagnosis})
if ('model.backend' not in mapping):
mapping.update({'model.backend': pythonic_config.benchmark.backend})
elif ((mapping['model.backend'] == 'default') and (pythonic_config.benchmark.backend != 'default')):
mapping.update({'model.backend': pythonic_config.benchmark.backend})
if ('model.backend' not in mapping):
mapping.update({'model.backend': 'default'})
for (k, v) in mapping.items():
if (k in ['tuning.accuracy_criterion.relative', 'tuning.accuracy_criterion.absolute']):
target_key = str(pythonic_config.quantization.accuracy_criterion)
if ((target_key not in k) and ('accuracy_criterion' in self.usr_cfg.tuning)):
if ((target_key in self.usr_cfg.tuning.accuracy_criterion) and (k.split('.')[(- 1)] in self.usr_cfg.tuning.accuracy_criterion)):
self.usr_cfg.tuning.accuracy_criterion.pop(k.split('.')[(- 1)])
continue
if (v is not None):
deep_set(self.usr_cfg, k, v)
def _convert_cfg(self, src, dst):
for key in src:
if (key in dst):
if (isinstance(dst[key], dict) and isinstance(src[key], dict)):
if (key in ['accuracy_criterion', 'metric', 'dataset', 'criterion', 'optimizer']):
inter_key = (src[key].keys() & (dst[key].keys() - {'higher_is_better'}))
if (len(inter_key) == 0):
dst[key] = {}
if ((key == 'accuracy') and src[key].get('multi_metrics', None)):
dst[key].pop('metric', None)
self._convert_cfg(src[key], dst[key])
elif (dst[key] == src[key]):
pass
else:
dst[key] = src[key]
elif isinstance(src[key], dict):
dst[key] = DotDict(self._convert_cfg(src[key], {}))
else:
dst[key] = src[key]
return dst |
def test_from_spark_xshards(orca_context_fixture):
(ray_xshards, ndarray_dict) = get_ray_xshards()
data_parts = ray_xshards.collect()
verify_collect_results(data_parts, ndarray_dict) |
def get_plot_color(ind, ncolors=10):
colorlist = [hsv_to_rgb((h, 1, 0.7)) for h in jnp.linspace(0, 0.8, ncolors)]
return colorlist[(ind % ncolors)] |
def main(args):
try:
(opts, args) = getopt.getopt(args, '', ['sleep-for-animation=', ''])
except getopt.GetoptError as err:
print(str(err))
sys.exit(2)
sleep_for_animation = True
for (o, a) in opts:
if (o in '--sleep-for-animation'):
sleep_for_animation = str2bool(a)
else:
assert False, 'unhandled option'
env = RailEnv(width=7, height=7, rail_generator=complex_rail_generator(nr_start_goal=10, nr_extra=1, min_dist=5, max_dist=99999, seed=1), schedule_generator=complex_schedule_generator(), number_of_agents=1, obs_builder_object=SingleAgentNavigationObs())
(obs, info) = env.reset()
env_renderer = RenderTool(env)
env_renderer.render_env(show=True, frames=True, show_observations=True)
for step in range(100):
action = (np.argmax(obs[0]) + 1)
(obs, all_rewards, done, _) = env.step({0: action})
print('Rewards: ', all_rewards, ' [done=', done, ']')
env_renderer.render_env(show=True, frames=True, show_observations=True)
if sleep_for_animation:
time.sleep(0.1)
if done['__all__']:
break
env_renderer.close_window() |
_model
def vit_tiny_r_s16_p8_224(pretrained=False, **kwargs):
backbone = _resnetv2(layers=(), **kwargs)
model_kwargs = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = _create_vision_transformer_hybrid('vit_tiny_r_s16_p8_224', backbone=backbone, pretrained=pretrained, **model_kwargs)
return model |
def get_stories(f, only_supporting=False):
with open(f) as f:
return parse_stories(f.readlines(), only_supporting=only_supporting) |
class RayTuneReporter(Callback):
def on_epoch_end(self, epoch: int, logs: Optional[Dict]=None, metric: Optional[float]=None):
report_dict = {}
for (k, v) in self.trainer.history.items():
report_dict.update({k: v[(- 1)]})
if hasattr(self.trainer, 'lr_history'):
for (k, v) in self.trainer.lr_history.items():
report_dict.update({k: v[(- 1)]})
tune.report(report_dict) |
def predict(model, data):
return features.predict_voted(exsettings, model, data, loader=load_sample, method=exsettings['voting'], overlap=exsettings['voting_overlap']) |
class CLIPScore(nn.Module):
def __init__(self, clipscore_w=2.5, image_size=224, mode='clip_s', use_grammar=False, joint_out=False):
super(CLIPScore, self).__init__()
self.clip_model = CLIPModel.from_pretrained('openai/clip-vit-base-patch32')
self.tokenizer = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32')
self.clip_model.eval()
self.clipscore_w = clipscore_w
self.image_transform = self._transform(image_size)
self.mode = mode
assert (mode in ['clip_s', 'refclip_s'])
self.use_grammar = use_grammar
self.joint_out = joint_out
if (self.use_grammar and (joint_out is False)):
self.grammar_score_head = nn.Sequential(nn.Linear(self.clip_model.text_embed_dim, self.clip_model.projection_dim, bias=False), nn.ReLU(), nn.Linear(self.clip_model.projection_dim, 2, bias=False))
def _transform(self, n_px):
return Compose([Resize(n_px, interpolation=Image.BICUBIC), CenterCrop(n_px), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0., 0.4578275, 0.), (0., 0., 0.))])
def load_image(self, image_path):
image = Image.open(image_path)
return image
def image_extract(self, image):
if isinstance(image, str):
image = self.load_image(image)
if (not isinstance(image, torch.Tensor)):
image = self.image_transform(image)
img_tensor = image.view((- 1), 3, 224, 224)
device = next(self.clip_model.parameters()).device
img_tensor = img_tensor.to(device)
clip_model = self.clip_model
img_feat = clip_model.vision_model(img_tensor).pooler_output
img_feat = clip_model.visual_projection(img_feat)
img_feat = (img_feat / img_feat.norm(dim=(- 1), keepdim=True))
return img_feat
def text_extract(self, text, prompt='A photo depicts', proj_norm=True):
if isinstance(text, str):
text_batch = [' '.join([prompt, text])]
elif isinstance(text, list):
text_batch = [' '.join([prompt, txt]) for txt in text]
if (isinstance(text, tuple) and isinstance(text[0], torch.Tensor)):
(input_ids, attention_mask) = text
else:
input_text = text_batch
tokenized = self.tokenizer(input_text, return_tensors='pt', padding=True, truncation=True)
input_ids = tokenized.input_ids
attention_mask = tokenized.attention_mask
clip_model = self.clip_model
device = next(self.clip_model.parameters()).device
input_ids = input_ids.to(device)
attention_mask = attention_mask.to(device)
text_feat = clip_model.text_model(input_ids, attention_mask).pooler_output
if proj_norm:
text_feat = clip_model.text_projection(text_feat)
text_feat = (text_feat / text_feat.norm(dim=(- 1), keepdim=True))
return text_feat
def calc_clip_s(self, img_feat, text_feat):
return (self.clipscore_w * torch.relu((img_feat * text_feat).sum(dim=(- 1))))
def calc_refclip_s(self, img_feat=None, text_feat=None, ref_text_feat=None, ref_text_mask=None, clip_s=None):
if (clip_s is None):
clip_s = self.calc_clip_s(img_feat, text_feat)
(B, dim) = img_feat.size()
ref_text_feat = ref_text_feat.view(B, (- 1), dim)
K = ref_text_feat.size(1)
text_feat = text_feat.view(B, 1, dim).expand((- 1), K, (- 1))
assert (ref_text_feat.size() == text_feat.size()), (ref_text_feat.size(), text_feat.size())
ref_score = self.calc_clip_s(text_feat, ref_text_feat)
if (ref_text_mask is not None):
if (not isinstance(ref_text_mask, torch.Tensor)):
ref_text_mask = torch.tensor(ref_text_mask, dtype=ref_score.dtype, device=ref_score.device)
ref_score = (ref_score.view(B, K) * ref_text_mask.view(B, K))
ref_score = ref_score.view(B, K).max(dim=1).values
assert (clip_s.size() == (B,))
assert (clip_s.size() == ref_score.size())
refclip_s = (2 / ((1 / clip_s) + (1 / ref_score)))
return refclip_s
_grad()
def forward(self, images=None, text=None, img_feat=None, text_feat=None, ref_text=None, ref_text_feat=None, ref_text_mask=None, prompt='A photo depicts', mode=None):
if (img_feat is None):
img_feat = self.image_extract(images)
img_feat = img_feat.view((- 1), 512)
B = img_feat.size(0)
if (text_feat is None):
text_feat = self.text_extract(text, prompt=prompt)
text_feat = text_feat.view((- 1), 512)
if (mode is None):
mode = self.mode
assert (mode in ['clip_s', 'refclip_s'])
if (mode == 'clip_s'):
clip_s = self.calc_clip_s(img_feat, text_feat)
return clip_s
elif (mode == 'refclip_s'):
if (ref_text_feat is None):
ref_text_feat = self.text_extract(ref_text, prompt=prompt)
ref_text_feat = ref_text_feat.view((- 1), 512)
refclip_s = self.calc_refclip_s(img_feat, text_feat, ref_text_feat, ref_text_mask=ref_text_mask)
return refclip_s
def train_step(self, images=None, text=None, img_feat=None, text_feat=None, neg_text=None, neg_text_feat=None, prompt='A photo depicts', **kwargs):
if (img_feat is None):
img_feat = self.image_extract(images)
img_feat = img_feat.view((- 1), 512)
B = img_feat.size(0)
if (text_feat is None):
text_feat = self.text_extract(text, prompt=prompt, proj_norm=False)
text_cont_feat = self.clip_model.text_projection(text_feat)
text_cont_feat = (text_cont_feat / text_cont_feat.norm(dim=(- 1), keepdim=True))
text_cont_feat = text_cont_feat.view(B, 512)
logit_scale = self.clip_model.logit_scale.exp()
logits_per_text = (torch.matmul(text_cont_feat, img_feat.t()) * logit_scale)
clip_loss = clip_loss_fn(logits_per_text)
pos_text_feat = text_feat.view(B, 512)
neg_text_feat = self.text_extract(neg_text, prompt=prompt, proj_norm=False).view(B, 512)
grammar_text_feat = torch.cat([pos_text_feat, neg_text_feat], dim=0)
grammar_text_logit = self.grammar_score_head(grammar_text_feat)
grammar_labels = torch.LongTensor((([1] * B) + ([0] * B))).to(grammar_text_logit.device).view((2 * B))
grammar_loss = torch.nn.functional.cross_entropy(grammar_text_logit, grammar_labels)
grammar_pred = grammar_text_logit.argmax(dim=1, keepdim=False)
grammar_pos_pred = grammar_pred[:B]
grammar_neg_pred = grammar_pred[B:]
out = {'clip_loss': clip_loss, 'grammar_loss': grammar_loss, 'img_feat': img_feat, 'text_feat': text_cont_feat, 'neg_text_feat': neg_text_feat, 'grammar_pos_pred': grammar_pos_pred, 'grammar_neg_pred': grammar_neg_pred}
return out |
class CosineAnnealingWarmUpRestarts(lr_scheduler._LRScheduler):
def __init__(self, optimizer, T_0, T_mult=1, eta_max=0.1, T_warmup=10000, gamma=1.0, last_epoch=(- 1)):
self.T_0 = T_0
self.T_mult = T_mult
self.eta_max = eta_max
self.T_warmup = T_warmup
self.gamma = gamma
self.T_cur = 0
self.lr_min = 0
self.iteration = 0
super(CosineAnnealingWarmUpRestarts, self).__init__(optimizer, last_epoch)
def get_lr(self):
if (self.iteration < self.T_warmup):
lr = ((self.eta_max * self.iteration) / self.T_warmup)
else:
self.T_cur = (self.iteration - self.T_warmup)
T_i = self.T_0
while (self.T_cur >= T_i):
self.T_cur -= T_i
T_i *= self.T_mult
self.lr_min = (self.eta_max * (self.gamma ** self.T_cur))
lr = (self.lr_min + ((0.5 * (self.eta_max - self.lr_min)) * (1 + math.cos(((math.pi * self.T_cur) / T_i)))))
self.iteration += 1
return [lr for _ in self.optimizer.param_groups]
def step(self, epoch=None):
if (epoch is None):
epoch = (self.last_epoch + 1)
self.last_epoch = epoch
self._update_lr()
self._update_T()
def _update_lr(self):
self.optimizer.param_groups[0]['lr'] = self.get_lr()[0]
def _update_T(self):
if (self.T_cur == self.T_0):
self.T_cur = 0
self.lr_min = 0
self.iteration = 0
self.T_0 *= self.T_mult
self.eta_max *= self.gamma |
class AttnBasicBlock(nn.Module):
expansion: int = 1
def __init__(self, inplanes: int, planes: int, stride: int=1, downsample: Optional[nn.Module]=None, groups: int=1, base_width: int=64, dilation: int=1, norm_layer: Optional[Callable[(..., nn.Module)]]=None, attention: bool=True) -> None:
super(AttnBasicBlock, self).__init__()
self.attention = attention
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
if ((groups != 1) or (base_width != 64)):
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if (dilation > 1):
raise NotImplementedError('Dilation > 1 not supported in BasicBlock')
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out |
def test_cuda_rng_tracker(model_parallel_size):
if (torch.distributed.get_rank() == 0):
print('> testing cuda rng tracker with size {} ...'.format(model_parallel_size))
mpu.initialize_model_parallel(model_parallel_size)
model_parallel_size = mpu.get_model_parallel_world_size()
seed_1 = 1234
seed_2 = 4321
size = [12, 21]
tensor = torch.cuda.FloatTensor(size)
torch.cuda.manual_seed(seed_1)
torch.randn(size, out=tensor)
target_11 = tensor.clone()
torch.randn(size, out=tensor)
target_12 = tensor.clone()
torch.cuda.manual_seed(seed_2)
torch.randn(size, out=tensor)
target_21 = tensor.clone()
torch.randn(size, out=tensor)
target_22 = tensor.clone()
torch.cuda.manual_seed(seed_1)
mpu.get_cuda_rng_tracker().add('test', seed_2)
torch.randn(size, out=tensor)
result_11 = tensor.clone()
with mpu.get_cuda_rng_tracker().fork('test'):
torch.randn(size, out=tensor)
result_21 = tensor.clone()
torch.randn(size, out=tensor)
result_12 = tensor.clone()
with mpu.get_cuda_rng_tracker().fork('test'):
torch.randn(size, out=tensor)
result_22 = tensor.clone()
diff = result_11.sub(result_21).abs().max()
diff = min(diff, result_12.sub(result_22).abs().max())
print(' max diff in generated tensors (should be non-zero) on global rank {}: {}'.format(torch.distributed.get_rank(), diff))
assert (diff > 1e-06)
error = max(result_11.sub(target_11).abs().max(), result_12.sub(target_12).abs().max())
error = max(error, result_21.sub(target_21).abs().max())
error = max(error, result_22.sub(target_22).abs().max())
print(' max error in generated tensors (should be zero) on global rank {}: {}'.format(torch.distributed.get_rank(), error))
assert (error < 1e-06)
mpu.get_cuda_rng_tracker().reset()
mpu.destroy_model_parallel()
torch.distributed.barrier()
if (torch.distributed.get_rank() == 0):
print('>> passed the test :-)') |
def _do_python_eval(json_dataset, salt, output_dir='output'):
info = voc_info(json_dataset)
year = info['year']
anno_path = info['anno_path']
image_set_path = info['image_set_path']
devkit_path = info['devkit_path']
cachedir = os.path.join(devkit_path, 'annotations_cache')
aps = []
use_07_metric = (True if (int(year) < 2010) else False)
logger.info(('VOC07 metric? ' + ('Yes' if use_07_metric else 'No')))
if (not os.path.isdir(output_dir)):
os.mkdir(output_dir)
for (_, cls) in enumerate(json_dataset.classes):
if (cls == '__background__'):
continue
filename = _get_voc_results_file_template(json_dataset, salt).format(cls)
(rec, prec, ap) = voc_eval(filename, anno_path, image_set_path, cls, cachedir, ovthresh=0.5, use_07_metric=use_07_metric)
aps += [ap]
logger.info('AP for {} = {:.4f}'.format(cls, ap))
res_file = os.path.join(output_dir, (cls + '_pr.pkl'))
save_object({'rec': rec, 'prec': prec, 'ap': ap}, res_file)
logger.info('Mean AP = {:.4f}'.format(np.mean(aps)))
logger.info('')
logger.info('Results:')
for ap in aps:
logger.info('{:.3f}'.format(ap))
logger.info('{:.3f}'.format(np.mean(aps)))
logger.info('')
logger.info('')
logger.info('')
logger.info('Results computed with the **unofficial** Python eval code.')
logger.info('Results should be very close to the official MATLAB code.')
logger.info('Use `./tools/reval.py --matlab ...` for your paper.')
logger.info('-- Thanks, The Management')
logger.info('') |
def test_set(capture, doc):
s = m.get_set()
assert isinstance(s, set)
assert (s == {'key1', 'key2', 'key3'})
s.add('key4')
with capture:
m.print_anyset(s)
assert (capture.unordered == '\n key: key1\n key: key2\n key: key3\n key: key4\n ')
m.set_add(s, 'key5')
assert (m.anyset_size(s) == 5)
m.set_clear(s)
assert m.anyset_empty(s)
assert (not m.anyset_contains(set(), 42))
assert m.anyset_contains({42}, 42)
assert m.anyset_contains({'foo'}, 'foo')
assert (doc(m.get_set) == 'get_set() -> set')
assert (doc(m.print_anyset) == 'print_anyset(arg0: anyset) -> None') |
class TerminalOutput(Widget):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.out_put = widgets.Output(layout={'border': '1px solid black', 'min_width': '300px', 'min_height': '300px', 'max_height': '600px', 'width': 'auto', 'height': 'auto', 'overflow': 'scroll'})
self.title = widgets.HTML(value='<H3>Terminal</H3>')
def show(self):
return widgets.VBox([self.title, self.out_put])
def get_output(self):
return self.out_put |
def train(train_loader, device, net, criterion, optimizer):
psnr_iter_train = []
loss_iter_train = []
ssim_iter_train = []
args.temperature = 1.0
for (idx_iter, (data, label)) in tqdm(enumerate(train_loader), total=len(train_loader), ncols=70):
data = data.to(device)
label = label.to(device)
out = net(data)
loss = criterion(out, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.cuda.empty_cache()
loss_iter_train.append(loss.data.cpu())
(psnr, ssim) = cal_metrics(args, label, out)
psnr_iter_train.append(psnr)
ssim_iter_train.append(ssim)
pass
loss_epoch_train = float(np.array(loss_iter_train).mean())
psnr_epoch_train = float(np.array(psnr_iter_train).mean())
ssim_epoch_train = float(np.array(ssim_iter_train).mean())
return (loss_epoch_train, psnr_epoch_train, ssim_epoch_train) |
def construct_path(proj_root: str, exp_name: str, xlsx_name: str) -> dict:
ckpt_path = os.path.join(proj_root, 'output')
pth_log_path = os.path.join(ckpt_path, exp_name)
tb_path = os.path.join(pth_log_path, 'tb')
save_path = os.path.join(pth_log_path, 'pre')
pth_path = os.path.join(pth_log_path, 'pth')
final_full_model_path = os.path.join(pth_path, 'checkpoint_final.pth.tar')
final_state_path = os.path.join(pth_path, 'state_final.pth')
tr_log_path = os.path.join(pth_log_path, f'tr_{str(datetime.now())[:10]}.txt')
te_log_path = os.path.join(pth_log_path, f'te_{str(datetime.now())[:10]}.txt')
cfg_log_path = os.path.join(pth_log_path, f'cfg_{str(datetime.now())[:10]}.txt')
trainer_log_path = os.path.join(pth_log_path, f'trainer_{str(datetime.now())[:10]}.txt')
xlsx_path = os.path.join(ckpt_path, xlsx_name)
path_config = {'ckpt_path': ckpt_path, 'pth_log': pth_log_path, 'tb': tb_path, 'save': save_path, 'pth': pth_path, 'final_full_net': final_full_model_path, 'final_state_net': final_state_path, 'tr_log': tr_log_path, 'te_log': te_log_path, 'cfg_log': cfg_log_path, 'trainer_log': trainer_log_path, 'xlsx': xlsx_path}
return path_config |
def rvad(speechproc, path):
(winlen, ovrlen, pre_coef, nfilter, nftt) = (0.025, 0.01, 0.97, 20, 512)
ftThres = 0.5
vadThres = 0.4
opts = 1
(data, fs) = sf.read(path)
assert (fs == 16000), 'sample rate must be 16khz'
(ft, flen, fsh10, nfr10) = speechproc.sflux(data, fs, winlen, ovrlen, nftt)
pv01 = np.zeros(ft.shape[0])
pv01[np.less_equal(ft, ftThres)] = 1
pitch = deepcopy(ft)
pvblk = speechproc.pitchblockdetect(pv01, pitch, nfr10, opts)
ENERGYFLOOR = np.exp((- 50))
b = np.array([0.977, (- 0.977)])
a = np.array([1.0, (- 0.954)])
fdata = lfilter(b, a, data, axis=0)
(noise_samp, noise_seg, n_noise_samp) = speechproc.snre_highenergy(fdata, nfr10, flen, fsh10, ENERGYFLOOR, pv01, pvblk)
for j in range(n_noise_samp):
fdata[range(int(noise_samp[(j, 0)]), (int(noise_samp[(j, 1)]) + 1))] = 0
vad_seg = speechproc.snre_vad(fdata, nfr10, flen, fsh10, ENERGYFLOOR, pv01, pvblk, vadThres)
return (vad_seg, data) |
def initialise_halo_sim():
M_pos = 1.0
M_neg = (- 3.0)
a_scale = 1.0
gauss_vel_comp = 0.3
cube_neg_width = 200
sim_name = 'halo'
return (M_pos, M_neg, a_scale, gauss_vel_comp, cube_neg_width, sim_name) |
_model('s2t_transformer')
class S2TTransformerModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
def add_args(parser):
parser.add_argument('--conv-kernel-sizes', type=str, metavar='N', help='kernel sizes of Conv1d subsampling layers')
parser.add_argument('--conv-channels', type=int, metavar='N', help='# of channels in Conv1d subsampling layers')
parser.add_argument('--activation-fn', type=str, default='relu', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN')
parser.add_argument('--acoustic-encoder-layers', type=int, metavar='N', help='num acoustic encoder layers')
parser.add_argument('--translation-encoder-layers', type=int, metavar='N', help='num translation encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads')
parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--layernorm-embedding', action='store_true', help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true', help='if True, dont scale embeddings')
parser.add_argument('--load-pretrained-asr-encoder-from', type=str, metavar='STR', help='model to take asr encoder weights from (for initialization)')
parser.add_argument('--load-pretrained-mt-encoder-decoder-from', type=str, metavar='STR', help='model to take mt encoder/decoder weights from (for initialization)')
parser.add_argument('--encoder-freezing-updates', type=int, metavar='N', help='freeze encoder for first N updates')
parser.add_argument('--mixup', action='store_true', help='if mix input of translation encoder')
parser.add_argument('--mixup-arguments', type=str, metavar='STR', help='arguments for adjusting the probability p of mixup')
def build_encoder(cls, args, task=None, embed_tokens=None):
return S2TTransformerEncoder(args, task.target_dictionary, embed_tokens)
def build_decoder(cls, args, task, embed_tokens):
return TransformerDecoderScriptable(args, task.target_dictionary, embed_tokens)
def build_model(cls, args, task):
base_architecture(args)
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
decoder_embed_tokens = build_embedding(task.target_dictionary, args.decoder_embed_dim)
encoder_embed_tokens = decoder_embed_tokens
encoder = cls.build_encoder(args, task, encoder_embed_tokens)
decoder = cls.build_decoder(args, task, decoder_embed_tokens)
asr_pretraining_path = getattr(args, 'load_pretrained_asr_encoder_from', None)
if ((asr_pretraining_path is not None) and Path(asr_pretraining_path).exists()):
asr_state = checkpoint_utils.load_checkpoint_to_cpu(asr_pretraining_path)
asr_state_dict = OrderedDict()
for key in asr_state['model'].keys():
if key.startswith('encoder'):
subkey = key[(len('encoder') + 1):]
asr_state_dict[subkey] = asr_state['model'][key]
encoder.load_state_dict(asr_state_dict, strict=False)
logger.info(f'loaded pretrained asr encoder from: {asr_pretraining_path}')
mt_pretraining_path = getattr(args, 'load_pretrained_mt_encoder_decoder_from', None)
if ((mt_pretraining_path is not None) and Path(mt_pretraining_path).exists()):
mt_state = checkpoint_utils.load_checkpoint_to_cpu(mt_pretraining_path)
mt_encoder_state_dict = OrderedDict()
mt_decoder_state_dict = OrderedDict()
for key in mt_state['model'].keys():
if key.startswith('encoder'):
subkey = key[(len('encoder') + 1):]
mt_encoder_state_dict[subkey] = mt_state['model'][key]
if key.startswith('decoder'):
subkey = key[(len('decoder') + 1):]
mt_decoder_state_dict[subkey] = mt_state['model'][key]
encoder.load_state_dict(mt_encoder_state_dict, strict=False)
decoder.load_state_dict(mt_decoder_state_dict, strict=False)
logger.info(f'loaded pretrained mt encoder and decoder from: {mt_pretraining_path}')
return cls(encoder, decoder)
def get_normalized_probs(self, net_output: Tuple[(Tensor, Optional[Dict[(str, List[Optional[Tensor]])]])], log_probs: bool, sample: Optional[Dict[(str, Tensor)]]=None):
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def forward(self, audio, audio_lengths, source, source_lengths, prev_output_tokens, align_pad, align_lengths):
encoder_out = self.encoder(audio, audio_lengths, source, source_lengths, align_pad, align_lengths)
decoder_out = self.decoder(prev_output_tokens=prev_output_tokens, encoder_out=encoder_out)
return decoder_out |
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language')
parser.add_argument('--trainpref', metavar='FP', default=None, help='train file prefix')
parser.add_argument('--validpref', metavar='FP', default=None, help='comma separated, valid file prefixes')
parser.add_argument('--testpref', metavar='FP', default=None, help='comma separated, test file prefixes')
parser.add_argument('--destdir', metavar='DIR', default='data-bin', help='destination dir')
parser.add_argument('--thresholdtgt', metavar='N', default=0, type=int, help='map words appearing less than threshold times to unknown')
parser.add_argument('--thresholdsrc', metavar='N', default=0, type=int, help='map words appearing less than threshold times to unknown')
parser.add_argument('--tgtdict', metavar='FP', help='reuse given target dictionary')
parser.add_argument('--srcdict', metavar='FP', help='reuse given source dictionary')
parser.add_argument('--nwordstgt', metavar='N', default=(- 1), type=int, help='number of target words to retain')
parser.add_argument('--nwordssrc', metavar='N', default=(- 1), type=int, help='number of source words to retain')
parser.add_argument('--alignfile', metavar='ALIGN', default=None, help='an alignment file (optional)')
parser.add_argument('--output-format', metavar='FORMAT', default='binary', choices=['binary', 'raw'], help='output format (optional)')
parser.add_argument('--joined-dictionary', action='store_true', help='Generate joined dictionary')
parser.add_argument('--only-source', action='store_true', help='Only process the source language')
parser.add_argument('--padding-factor', metavar='N', default=8, type=int, help='Pad dictionary size to be multiple of N')
parser.add_argument('--workers', metavar='N', default=1, type=int, help='number of parallel workers')
return parser |
def register_point_cloud_pair(ply_file_names, s, t, transformation_init, config):
print(('reading %s ...' % ply_file_names[s]))
source = o3d.io.read_point_cloud(ply_file_names[s])
print(('reading %s ...' % ply_file_names[t]))
target = o3d.io.read_point_cloud(ply_file_names[t])
(transformation, information) = local_refinement(source, target, transformation_init, config)
if config['debug_mode']:
print(transformation)
print(information)
return (transformation, information) |
_HEADS_REGISTRY.register()
class TextHead(nn.Module):
def __init__(self, cfg, input_shape: Dict[(str, ShapeSpec)]):
super(TextHead, self).__init__()
pooler_resolution = cfg.MODEL.BATEXT.POOLER_RESOLUTION
pooler_scales = cfg.MODEL.BATEXT.POOLER_SCALES
sampling_ratio = cfg.MODEL.BATEXT.SAMPLING_RATIO
conv_dim = cfg.MODEL.BATEXT.CONV_DIM
num_conv = cfg.MODEL.BATEXT.NUM_CONV
canonical_size = cfg.MODEL.BATEXT.CANONICAL_SIZE
self.in_features = cfg.MODEL.BATEXT.IN_FEATURES
self.voc_size = cfg.MODEL.BATEXT.VOC_SIZE
recognizer = cfg.MODEL.BATEXT.RECOGNIZER
self.top_size = cfg.MODEL.TOP_MODULE.DIM
self.pooler = TopPooler(output_size=pooler_resolution, scales=pooler_scales, sampling_ratio=sampling_ratio, pooler_type='BezierAlign', canonical_box_size=canonical_size, canonical_level=3, assign_crit='bezier')
conv_block = conv_with_kaiming_uniform(norm='BN', activation=True)
tower = []
for i in range(num_conv):
tower.append(conv_block(conv_dim, conv_dim, 3, 1))
self.tower = nn.Sequential(*tower)
self.recognizer = build_recognizer(cfg, recognizer)
def forward(self, images, features, proposals, targets=None):
del images
features = [features[f] for f in self.in_features]
if self.training:
beziers = [p.beziers for p in targets]
targets = torch.cat([x.text for x in targets], dim=0)
else:
beziers = [p.top_feat for p in proposals]
bezier_features = self.pooler(features, beziers)
bezier_features = self.tower(bezier_features)
if self.training:
(preds, rec_loss) = self.recognizer(bezier_features, targets)
rec_loss *= 0.05
losses = {'rec_loss': rec_loss}
return (None, losses)
else:
if (bezier_features.size(0) == 0):
for box in proposals:
box.beziers = box.top_feat
box.recs = box.top_feat
return (proposals, {})
(preds, _) = self.recognizer(bezier_features, targets)
start_ind = 0
for proposals_per_im in proposals:
end_ind = (start_ind + len(proposals_per_im))
proposals_per_im.recs = preds[start_ind:end_ind]
proposals_per_im.beziers = proposals_per_im.top_feat
start_ind = end_ind
return (proposals, {}) |
class TestDatasets(unittest.TestCase):
def testListDataset(self):
h = [0, 1, 2]
d = dataset.ListDataset(elem_list=h, load=(lambda x: x))
self.assertEqual(len(d), 3)
self.assertEqual(d[0], 0)
t = torch.LongTensor([0, 1, 2])
d = dataset.ListDataset(elem_list=t, load=(lambda x: x))
self.assertEqual(len(d), 3)
self.assertEqual(d[0], 0)
a = np.asarray([0, 1, 2])
d = dataset.ListDataset(elem_list=a, load=(lambda x: x))
self.assertEqual(len(d), 3)
self.assertEqual(d[0], 0)
def testListDataset_path(self):
tbl = [0, 1, 2]
d = dataset.ListDataset(tbl, 'bar/{}'.format, 'foo')
self.assertEqual(len(d), 3)
self.assertEqual(d[2], 'bar/foo/2')
def testListDataset_file(self):
(_, filename) = tempfile.mkstemp()
with open(filename, 'w') as f:
for i in range(0, 50):
f.write((str(i) + '\n'))
d = dataset.ListDataset(filename, (lambda x: x), 'foo')
self.assertEqual(len(d), 50)
self.assertEqual(d[15], 'foo/15')
os.remove(filename)
def testTensorDataset(self):
data = {'input': np.arange(0, 8), 'target': np.arange(0, 8)}
d = dataset.TensorDataset(data)
self.assertEqual(len(d), 8)
self.assertEqual(d[2], {'input': 2, 'target': 2})
a = torch.randn(8)
d = dataset.TensorDataset(a)
self.assertEqual(len(a), len(d))
self.assertEqual(a[1], d[1])
d = dataset.TensorDataset([a])
self.assertEqual(len(a), len(d))
self.assertEqual(a[1], d[1][0])
def testBatchDataset(self):
if hasattr(torch, 'arange'):
t = torch.arange(0, 16).long()
else:
t = torch.range(0, 15).long()
batchsize = 8
d = dataset.ListDataset(t, (lambda x: {'input': x}))
d = dataset.BatchDataset(d, batchsize)
ex = d[0]['input']
self.assertEqual(len(ex), batchsize)
self.assertEqual(ex[(- 1)], (batchsize - 1))
def testResampleDataset(self):
tbl = dataset.TensorDataset(np.asarray([0, 1, 2]))
d = dataset.ResampleDataset(tbl, (lambda dataset, i: (i % 2)))
self.assertEqual(len(d), 3)
self.assertEqual(d[0], 0)
self.assertEqual(d[2], 0)
def testShuffleDataset(self):
tbl = dataset.TensorDataset(np.asarray([0, 1, 2, 3, 4]))
d = dataset.ShuffleDataset(tbl)
self.assertEqual(len(d), 5)
def testSplitDataset(self):
h = [0, 1, 2, 3]
listdataset = dataset.ListDataset(elem_list=h)
splitdataset = dataset.SplitDataset(listdataset, {'train': 3, 'val': 1})
splitdataset.select('train')
self.assertEqual(len(splitdataset), 3)
self.assertEqual(splitdataset[2], 2)
splitdataset.select('val')
self.assertEqual(len(splitdataset), 1)
self.assertEqual(splitdataset[0], 3)
splitdataset = listdataset.split({'train': 3, 'val': 1})
splitdataset.select('train')
self.assertEqual(len(splitdataset), 3)
self.assertEqual(splitdataset[2], 2)
def testSplitDataset_fractions(self):
h = [0, 1, 2, 3]
listdataset = dataset.ListDataset(elem_list=h)
splitdataset = dataset.SplitDataset(listdataset, {'train': 0.75, 'val': 0.25})
splitdataset.select('train')
self.assertEqual(len(splitdataset), 3)
self.assertEqual(splitdataset[2], 2)
splitdataset.select('val')
self.assertEqual(len(splitdataset), 1)
self.assertEqual(splitdataset[0], 3)
def testConcatDataset(self):
l1 = dataset.ListDataset(elem_list=[0, 1, 2, 3])
l2 = dataset.ListDataset(elem_list=[10, 11, 13])
concatdataset = dataset.ConcatDataset([l1, l2])
self.assertEqual(len(concatdataset), 7)
self.assertEqual(concatdataset[0], 0)
self.assertEqual(concatdataset[3], 3)
self.assertEqual(concatdataset[4], 10)
self.assertEqual(concatdataset[6], 13) |
def scanLineForExceptionHandling(line):
global options
if ((not options.haveExceptionHandling) and exception_re.search(line)):
if (not options.noExceptionHandling):
options.haveExceptionHandling = 1 |
class LayerNorm(nn.Module):
def __init__(self, features, center=True, scale=False, eps=1e-06):
super().__init__()
self.center = center
self.scale = scale
self.eps = eps
if self.scale:
self.scale_param = nn.Parameter(torch.ones(features))
else:
self.scale_param = None
if self.center:
self.center_param = nn.Parameter(torch.zeros(features))
else:
self.center_param = None
def forward(self, x):
mean = x.mean((- 1), keepdim=True)
std = x.std((- 1), keepdim=True)
output = ((x - mean) / (std + self.eps))
if self.scale:
output = (output * self.scale_param)
if self.center:
output = (output + self.center_param)
return output |
def label_prop(C, nt, Dct, lp='linear'):
Dct = abs(Dct)
model = pulp.LpProblem('Cost minimising problem', pulp.LpMinimize)
Mcj = pulp.LpVariable.dicts('Probability', ((i, j) for i in range(C) for j in range(nt)), lowBound=0, upBound=1, cat='Continuous')
model += pulp.lpSum([(Dct[(j, i)] * Mcj[(i, j)]) for i in range(C) for j in range(nt)])
for j in range(nt):
model += (pulp.lpSum([Mcj[(i, j)] for i in range(C)]) == 1)
for i in range(C):
model += (pulp.lpSum([Mcj[(i, j)] for j in range(nt)]) >= 1)
model.solve()
pulp.LpStatus[model.status]
Output = [[Mcj[(i, j)].varValue for i in range(C)] for j in range(nt)]
return np.array(Output) |
def main():
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--dir', default='/tmp/data', metavar='N', help='the folder store mnist data')
parser.add_argument('--batch-size', type=int, default=256, metavar='N', help='input batch size for training per executor(default: 256)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', help='input batch size for testing per executor(default: 1000)')
parser.add_argument('--epochs', type=int, default=2, metavar='N', help='number of epochs to train (default: 2)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.001)')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--save-model', action='store_true', default=False, help='For Saving the current Model')
parser.add_argument('--deploy-mode', default='local', help='supported deploy mode is local, yarn-client, yarn-cluster')
parser.add_argument('--download', type=bool, default=True, help='download dataset or prepare by yourself')
args = parser.parse_args()
torch.manual_seed(args.seed)
if (not exists(args.dir)):
makedirs(args.dir)
train_loader = torch.utils.data.DataLoader(datasets.MNIST(args.dir, train=True, download=args.download, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(datasets.MNIST(args.dir, train=False, download=args.download, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.test_batch_size, shuffle=False)
if (args.deploy_mode == 'local'):
sc = init_orca_context()
else:
sc = init_orca_context(cluster_mode=args.deploy_mode)
model = Net()
model.train()
criterion = nn.NLLLoss()
adam = torch.optim.Adam(model.parameters(), lr=args.lr)
zoo_model = TorchModel.from_pytorch(model)
zoo_criterion = TorchLoss.from_pytorch(criterion)
zoo_optim = TorchOptim.from_pytorch(adam)
zoo_estimator = Estimator(zoo_model, optim_methods=zoo_optim)
train_featureset = FeatureSet.pytorch_dataloader(train_loader)
test_featureset = FeatureSet.pytorch_dataloader(test_loader)
from bigdl.dllib.optim.optimizer import MaxEpoch, EveryEpoch
zoo_estimator.train_minibatch(train_featureset, zoo_criterion, end_trigger=MaxEpoch(args.epochs), checkpoint_trigger=EveryEpoch(), validation_set=test_featureset, validation_method=[Accuracy()]) |
class ResNet9(Base):
def __init__(self, in_channels, num_classes):
super().__init__()
self.prep = conv_bn_relu_pool(in_channels, 64)
self.layer1_head = conv_bn_relu_pool(64, 128, pool=True)
self.layer1_residual = nn.Sequential(conv_bn_relu_pool(128, 128), conv_bn_relu_pool(128, 128))
self.layer2 = conv_bn_relu_pool(128, 256, pool=True)
self.layer3_head = conv_bn_relu_pool(256, 512, pool=True)
self.layer3_residual = nn.Sequential(conv_bn_relu_pool(512, 512), conv_bn_relu_pool(512, 512))
self.MaxPool2d = nn.Sequential(nn.MaxPool2d(4))
self.linear = nn.Linear(512, num_classes)
def forward(self, x):
x = self.prep(x)
x = self.layer1_head(x)
x = (self.layer1_residual(x) + x)
x = self.layer2(x)
x = self.layer3_head(x)
x = (self.layer3_residual(x) + x)
x = self.MaxPool2d(x)
x = x.view(x.size(0), (- 1))
x = self.linear(x)
return x |
_builder('msvd_qa')
class MSVDQABuilder(VideoQABuilder):
DATASET_CONFIG_DICT = {'default': 'configs/datasets/msvd/defaults_qa.yaml'} |
def check_service_status(port_lst, service_address):
count = 0
msg = 'Neural Solution is running.'
for port in port_lst:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((service_address, port))
sock.send(serialize({'ping': 'test'}))
sock.settimeout(5)
response = sock.recv(1024)
if (response == b'ok'):
count += 1
sock.close()
continue
except ConnectionRefusedError:
msg = 'Ping fail! Make sure Neural Solution runner is running!'
break
except Exception as e:
msg = 'Ping fail! {}'.format(e)
break
sock.close()
return ({'status': 'Healthy', 'msg': msg} if (count == 1) else {'status': 'Failed', 'msg': msg}) |
def create_dataloaders(args):
ds_kwargs = {'streaming': True}
train_data = load_dataset(args.dataset_name_train, split='train', **ds_kwargs)
train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=args.seed)
valid_data = load_dataset(args.dataset_name_valid, split='train', **ds_kwargs)
train_dataset = ConstantLengthDataset(tokenizer, train_data, infinite=True, seq_length=args.seq_length)
valid_dataset = ConstantLengthDataset(tokenizer, valid_data, infinite=False, seq_length=args.seq_length)
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size)
eval_dataloader = DataLoader(valid_dataset, batch_size=args.valid_batch_size)
return (train_dataloader, eval_dataloader) |
def resnet110_svhn(num_classes=10, **kwargs):
return get_resnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name='resnet110_svhn', **kwargs) |
class ONNX(MXNet):
def __init__(self, graph_optimization_level=None, precisions=None):
super().__init__(precisions)
self._graph_optimization_level = graph_optimization_level
def graph_optimization_level(self):
return self._graph_optimization_level
_optimization_level.setter
def graph_optimization_level(self, graph_optimization_level):
if _check_value('graph_optimization_level', graph_optimization_level, str, ['DISABLE_ALL', 'ENABLE_BASIC', 'ENABLE_EXTENDED', 'ENABLE_ALL']):
self._graph_optimization_level = graph_optimization_level |
def _parse_fail(name, var_type, value, values):
raise ValueError(("Could not parse hparam '%s' of type '%s' with value '%s' in %s" % (name, var_type.__name__, value, values))) |
def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope):
return batch_norm_template(inputs, is_training, scope, [0, 1, 2, 3], bn_decay) |
def expand_span(span):
if (',' in span):
spans = span.split(',')
new_span = []
for sp in spans:
if ('..' in sp):
(off1, off2) = sp.split('..')
off1 = int(off1.split('_')[(- 1)])
off2 = int(off2.split('_')[(- 1)])
r = list(range(off1, (off2 + 1)))
new_span.extend([('word_' + str(i)) for i in r])
else:
new_span.extend([sp])
return new_span
elif ('..' in span):
(off1, off2) = span.split('..')
off1 = int(off1.split('_')[(- 1)])
off2 = int(off2.split('_')[(- 1)])
r = list(range(off1, (off2 + 1)))
span = [('word_' + str(i)) for i in r]
else:
span = [span]
return span |
class If(Node):
def __init__(self, children):
super().__init__('if', children, None)
def qasm(self, prec=15):
return ((((('if(' + self.children[0].qasm(prec)) + '==') + str(self.children[1].value)) + ') ') + self.children[2].qasm(prec)) |
def plot_basics(data, data_inst, fig, units):
from powerlaw import plot_pdf, Fit, pdf
annotate_coord = ((- 0.4), 0.95)
ax1 = fig.add_subplot(n_graphs, n_data, data_inst)
(x, y) = pdf(data, linear_bins=True)
ind = (y > 0)
y = y[ind]
x = x[:(- 1)]
x = x[ind]
ax1.scatter(x, y, color='r', s=0.5)
plot_pdf(data[(data > 0)], ax=ax1, color='b', linewidth=2)
from pylab import setp
setp(ax1.get_xticklabels(), visible=False)
if (data_inst == 1):
ax1.annotate('A', annotate_coord, xycoords='axes fraction', fontproperties=panel_label_font)
from mpl_toolkits.axes_grid.inset_locator import inset_axes
ax1in = inset_axes(ax1, width='30%', height='30%', loc=3)
ax1in.hist(data, normed=True, color='b')
ax1in.set_xticks([])
ax1in.set_yticks([])
ax2 = fig.add_subplot(n_graphs, n_data, (n_data + data_inst), sharex=ax1)
plot_pdf(data, ax=ax2, color='b', linewidth=2)
fit = Fit(data, xmin=1, discrete=True)
fit.power_law.plot_pdf(ax=ax2, linestyle=':', color='g')
p = fit.power_law.pdf()
ax2.set_xlim(ax1.get_xlim())
fit = Fit(data, discrete=True)
fit.power_law.plot_pdf(ax=ax2, linestyle='--', color='g')
from pylab import setp
setp(ax2.get_xticklabels(), visible=False)
if (data_inst == 1):
ax2.annotate('B', annotate_coord, xycoords='axes fraction', fontproperties=panel_label_font)
ax2.set_ylabel(u'p(X)')
ax3 = fig.add_subplot(n_graphs, n_data, ((n_data * 2) + data_inst))
fit.power_law.plot_pdf(ax=ax3, linestyle='--', color='g')
fit.exponential.plot_pdf(ax=ax3, linestyle='--', color='r')
fit.plot_pdf(ax=ax3, color='b', linewidth=2)
ax3.set_ylim(ax2.get_ylim())
ax3.set_xlim(ax1.get_xlim())
if (data_inst == 1):
ax3.annotate('C', annotate_coord, xycoords='axes fraction', fontproperties=panel_label_font)
ax3.set_xlabel(units) |
def getBlas():
file_ = open('npConfg_file.txt', 'w')
with contextlib.redirect_stdout(file_):
numpy.show_config()
file_.close()
np_confg = open('npConfg_file.txt', 'r')
lib = ''
for line in np_confg:
if ('libraries' in line):
lib = line
break
np_confg.close()
os.remove('npConfg_file.txt')
if (lib != ''):
blas = lib.split('[')[1].split(',')[0]
return blas[1:(len(blas) - 1)]
return lib |
def check_na(df, column):
n = df.shape[0]
num_of_na = df[column].isna().sum()
frac_of_na = int((100.0 * (num_of_na / n)))
print((((((('# of NA values ' + column) + ': ') + str(num_of_na)) + ', ') + str(frac_of_na)) + '%'))
print(df[df[column].isna()].head()) |
def main():
args = parse_args()
if ('SLURM_NNODES' in os.environ):
slurm(args)
else:
distributed(args) |
def update_new_configs(ckpt_opts, new_opts):
for (k, v) in new_opts.items():
if (k not in ckpt_opts):
ckpt_opts[k] = v
if new_opts['update_param_list']:
for param in new_opts['update_param_list']:
ckpt_opts[param] = new_opts[param] |
def train(agent, train_result, config):
for day in train_days:
environment = init_env(day, config)
train_a_day(environment, agent, train_result) |
class BaseDataModule(LightningDataModule, ABC):
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None) -> None:
super().__init__()
self.datadir = Path(datadir)
train = self._validate_train_config(train)
val = self._validate_val_config(val)
test = self._validate_test_config(test)
self.train = train
self.val = val
self.test = test
self.train_dataset = None
self.val_dataset = None
self.test_dataset = None
if (train is None):
self.train_dataloader = super().train_dataloader
else:
self.traindir = (self.train.dataset.datadir if (self.train.get('dataset') and self.train.dataset.get('datadir')) else self.datadir)
if (val is None):
self.val_dataloader = super().val_dataloader
else:
self.valdir = (self.val.dataset.datadir if (self.val.get('dataset') and self.val.dataset.get('datadir')) else self.datadir)
if (test is None):
self.test_dataloader = super().test_dataloader
else:
self.testdir = (self.test.dataset.datadir if (self.test.get('dataset') and self.test.dataset.get('datadir')) else self.datadir)
def _validate_train_config(self, cfg: Optional[DictConfig]):
if (cfg is None):
return cfg
if cfg.get('loader'):
if cfg.loader.get('collate_fn'):
self.train_collate_fn = get_collate_fn(cfg.loader.collate_fn)
cfg.loader.pop('collate_fn')
else:
self.train_collate_fn = None
if cfg.loader.get('batch_size'):
cfg.loader.pop('batch_size')
rank_zero_warn('Batch size has been remove for train loader config because global_batch_size to train config should be passed.')
return cfg
def _validate_val_config(self, cfg: DictConfig):
if (cfg is None):
return cfg
if cfg.get('loader'):
if cfg.loader.get('collate_fn'):
self.val_collate_fn = get_collate_fn(cfg.loader.collate_fn)
cfg.loader.pop('collate_fn')
else:
self.val_collate_fn = None
if cfg.loader.get('batch_size'):
cfg.loader.pop('batch_size')
rank_zero_warn('Batch size has been remove for val loader config because global_batch_size to val config should be passed.')
return cfg
def _validate_test_config(self, cfg: DictConfig):
if (cfg is None):
return cfg
if cfg.get('loader'):
if cfg.loader.get('collate_fn'):
self.test_collate_fn = get_collate_fn(cfg.loader.collate_fn)
cfg.loader.pop('collate_fn')
else:
self.test_collate_fn = None
if cfg.loader.get('batch_size'):
cfg.loader.pop('batch_size')
rank_zero_warn('Batch size has been remove for test loader config because global_batch_size to test config should be passed.')
return cfg
def num_classes(self) -> int:
return (- 1)
def train_num_samples(self) -> int:
return (len(self.train_dataset) if self.train_dataset else 0)
def val_num_samples(self) -> int:
return (len(self.val_dataset) if self.val_dataset else 0)
def test_num_samples(self) -> int:
return (len(self.test_dataset) if self.test_dataset else 0)
def train_global_batch_size(self) -> int:
if (not self.train.get('global_batch_size')):
raise AttributeError('global_batch_size should be defined in train datamodule config.')
return self.train.global_batch_size
def val_global_batch_size(self) -> int:
if (not self.val.get('global_batch_size')):
raise AttributeError('global_batch_size should be defined in val datamodule config.')
return self.val.global_batch_size
def test_global_batch_size(self) -> int:
if (not self.test.get('global_batch_size')):
raise AttributeError('global_batch_size should be defined in test datamodule config.')
return self.test.global_batch_size
def train_local_batch_size(self) -> int:
if (self.trainer is not None):
return get_local_batch_size_in_trainer(self.train_global_batch_size, self.trainer)
else:
return self.train_global_batch_size
def val_local_batch_size(self) -> int:
if (self.trainer is not None):
return get_local_batch_size_in_trainer(self.val_global_batch_size, self.trainer)
else:
return self.val_global_batch_size
def test_local_batch_size(self) -> int:
if (self.trainer is not None):
return get_local_batch_size_in_trainer(self.test_global_batch_size, self.trainer)
else:
return self.test_global_batch_size
def train_dataloader(self) -> DataLoader:
if (self.train is None):
raise RuntimeError('No passed training configuration so dataloader cannot be retrieved.')
loader = DataLoader(self.train_dataset, batch_size=self.train_local_batch_size, collate_fn=self.train_collate_fn, **self.train.loader)
return loader
def val_dataloader(self) -> DataLoader:
if (self.val is None):
raise RuntimeError('No passed validation configuration so dataloader cannot be retrieved.')
loader = DataLoader(self.val_dataset, batch_size=self.val_local_batch_size, collate_fn=self.val_collate_fn, **self.val.loader)
return loader
def test_dataloader(self) -> DataLoader:
if (self.test is None):
raise RuntimeError('No passed testing configuration so dataloader cannot be retrieved.')
loader = DataLoader(self.test_dataset, batch_size=self.test_local_batch_size, collate_fn=self.test_collate_fn, **self.test.loader)
return loader |
def resnet_v1_152(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, reuse=None, scope='resnet_v1_152'):
blocks = [resnet_utils.Block('block1', bottleneck, (([(256, 64, 1)] * 2) + [(256, 64, 2)])), resnet_utils.Block('block2', bottleneck, (([(512, 128, 1)] * 7) + [(512, 128, 2)])), resnet_utils.Block('block3', bottleneck, (([(1024, 256, 1)] * 35) + [(1024, 256, 2)])), resnet_utils.Block('block4', bottleneck, ([(2048, 512, 1)] * 3))]
return resnet_v1(inputs, blocks, num_classes, is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, reuse=reuse, scope=scope) |
def tiny_resnet18(pretrained: bool=False, class_num=10, progress: bool=True) -> ResNet:
res18 = tiny_ResNet(BasicBlock, [2, 2, 2, 2], class_num=class_num)
res18.bn1 = nn.GroupNorm(num_groups=32, num_channels=64)
res18.layer1[0].bn1 = nn.GroupNorm(num_groups=32, num_channels=64)
res18.layer1[0].bn2 = nn.GroupNorm(num_groups=32, num_channels=64)
res18.layer1[1].bn1 = nn.GroupNorm(num_groups=32, num_channels=64)
res18.layer1[1].bn2 = nn.GroupNorm(num_groups=32, num_channels=64)
res18.layer2[0].bn1 = nn.GroupNorm(num_groups=32, num_channels=128)
res18.layer2[0].bn2 = nn.GroupNorm(num_groups=32, num_channels=128)
res18.layer2[0].shortcut[1] = nn.GroupNorm(num_groups=32, num_channels=128)
res18.layer2[1].bn1 = nn.GroupNorm(num_groups=32, num_channels=128)
res18.layer2[1].bn2 = nn.GroupNorm(num_groups=32, num_channels=128)
res18.layer3[0].bn1 = nn.GroupNorm(num_groups=32, num_channels=256)
res18.layer3[0].bn2 = nn.GroupNorm(num_groups=32, num_channels=256)
res18.layer3[0].shortcut[1] = nn.GroupNorm(num_groups=32, num_channels=256)
res18.layer3[1].bn1 = nn.GroupNorm(num_groups=32, num_channels=256)
res18.layer3[1].bn2 = nn.GroupNorm(num_groups=32, num_channels=256)
res18.layer4[0].bn1 = nn.GroupNorm(num_groups=32, num_channels=512)
res18.layer4[0].bn2 = nn.GroupNorm(num_groups=32, num_channels=512)
res18.layer4[0].shortcut[1] = nn.GroupNorm(num_groups=32, num_channels=512)
res18.layer4[1].bn1 = nn.GroupNorm(num_groups=32, num_channels=512)
res18.layer4[1].bn2 = nn.GroupNorm(num_groups=32, num_channels=512)
assert (len(dict(res18.named_parameters()).keys()) == len(res18.state_dict().keys())), 'More BN layers are there...'
return res18 |
def generate_pattern(state, rule, MAX_TIME):
for time in range(MAX_TIME):
print(state)
patterns = window(state)
state = ''.join((rule[pat] for pat in patterns))
state = '0{}0'.format(state)
print(state) |
class ActionPredictor():
def __init__(self):
pass
def predict(self, state: State, actions) -> dict:
raise NotImplementedError |
def get_norm(name, out_channels):
if (name == 'batch'):
norm = nn.BatchNorm2d(out_channels)
elif (name == 'instance'):
norm = nn.InstanceNorm2d(out_channels)
else:
norm = None
return norm |
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--eval_id', type=str, help='evaluation id')
self.parser.add_argument('--eval_results_dir', type=str, default=None, help='dir to save results, if not set, fall back to training results_dir')
self.parser.add_argument('--model_dir', type=str, help='dir contains the model file, will be converted to absolute path afterwards') |
def _get_empty_running_paths_dict():
return dict(observations=[], actions=[], rewards=[], env_infos=[], agent_infos=[]) |
def convert_json_to_pkl_local(root, _data_name):
convert_json_file_to_pkl_dump(path=(root + '/2merge-{}'.format(_data_name)), txt_fname='test', part=_data_name)
print('Test done. Training start.')
convert_json_file_to_pkl_dump(path=(root + '/2merge-{}'.format(_data_name)), txt_fname='train', part=_data_name)
print('Test done. Training done. Dev start.')
convert_json_file_to_pkl_dump(path=(root + '/2merge-{}'.format(_data_name)), txt_fname='dev', part=_data_name) |
def find_path(map, start, end, alg=AStarFinder):
grid = Grid(matrix=map)
g_start = grid.node(*start)
g_end = grid.node(*end)
finder = alg()
(path, runs) = finder.find_path(g_start, g_end, grid)
return path |
class Possessive_Rate(object):
def __init__(self, sentence_objs):
self.sentence_objs = sentence_objs
def handle(self):
(tot_num_adjs, tot_num_pron, tot_num_words) = (0, 0, 0)
for so in self.sentence_objs:
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_words += so.num_words()
return ((tot_num_adjs + tot_num_pron) / tot_num_words) |
def empty_param(mod, prefix_name='', ignore_save=False):
for name in mod._parameters:
if (mod._parameters[name] is not None):
param_cls = type(mod._parameters[name])
param_kwargs = mod._parameters[name].__dict__
if (not hasattr(mod._parameters[name], 'checkpoint_name')):
if (mod._parameters[name] in _TIE_DICT):
ckpt_name = _TIE_DICT[mod._parameters[name]]
else:
ckpt_name = get_checkpoint_name(prefix_name, is_param=True)
setattr(mod._parameters[name], 'checkpoint_name', ckpt_name)
else:
ckpt_name = mod._parameters[name].checkpoint_name
if (((not torch.distributed.is_initialized()) or ((local_rank() is not None) and (int(local_rank()) == 0) and (not ignore_save))) and (mod._parameters[name].device != torch.device('meta'))):
torch.save(mod._parameters[name], ckpt_name)
delattr(mod._parameters[name], 'checkpoint_name')
mod._parameters[name] = param_cls(mod._parameters[name].to(torch.device('meta')), requires_grad=mod._parameters[name].requires_grad, **param_kwargs)
setattr(mod._parameters[name], 'checkpoint_name', ckpt_name)
for name in mod._buffers:
if (mod._buffers[name] is not None):
if (not hasattr(mod._buffers[name], 'checkpoint_name')):
if (mod._buffers[name] in _TIE_DICT):
ckpt_name = _TIE_DICT[mod._buffers[name]]
else:
ckpt_name = get_checkpoint_name(prefix_name, is_param=False)
setattr(mod._buffers[name], 'checkpoint_name', ckpt_name)
else:
ckpt_name = mod._buffers[name].checkpoint_name
if (((not torch.distributed.is_initialized()) or ((local_rank() is not None) and (int(local_rank()) == 0) and (not ignore_save))) and (mod._buffers[name].device != torch.device('meta'))):
torch.save(mod._buffers[name], ckpt_name)
delattr(mod._buffers[name], 'checkpoint_name')
mod._buffers[name] = mod._buffers[name].to(torch.device('meta'))
setattr(mod._buffers[name], 'checkpoint_name', ckpt_name) |
class XLMRobertaForTokenClassification():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
_module()
class IterBasedRunnerAmp(IterBasedRunner):
def save_checkpoint(self, out_dir, filename_tmpl='iter_{}.pth', meta=None, save_optimizer=True, create_symlink=False):
if (meta is None):
meta = dict(iter=(self.iter + 1), epoch=(self.epoch + 1))
elif isinstance(meta, dict):
meta.update(iter=(self.iter + 1), epoch=(self.epoch + 1))
else:
raise TypeError(f'meta should be a dict or None, but got {type(meta)}')
if (self.meta is not None):
meta.update(self.meta)
filename = filename_tmpl.format((self.iter + 1))
filepath = osp.join(out_dir, filename)
optimizer = (self.optimizer if save_optimizer else None)
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
def resume(self, checkpoint, resume_optimizer=True, map_location='default'):
if (map_location == 'default'):
if torch.cuda.is_available():
device_id = torch.cuda.current_device()
checkpoint = self.load_checkpoint(checkpoint, map_location=(lambda storage, loc: storage.cuda(device_id)))
else:
checkpoint = self.load_checkpoint(checkpoint)
else:
checkpoint = self.load_checkpoint(checkpoint, map_location=map_location)
self._epoch = checkpoint['meta']['epoch']
self._iter = checkpoint['meta']['iter']
self._inner_iter = checkpoint['meta']['iter']
if (('optimizer' in checkpoint) and resume_optimizer):
if isinstance(self.optimizer, Optimizer):
self.optimizer.load_state_dict(checkpoint['optimizer'])
elif isinstance(self.optimizer, dict):
for k in self.optimizer.keys():
self.optimizer[k].load_state_dict(checkpoint['optimizer'][k])
else:
raise TypeError(f'Optimizer should be dict or torch.optim.Optimizer but got {type(self.optimizer)}')
if ('amp' in checkpoint):
apex.amp.load_state_dict(checkpoint['amp'])
self.logger.info('load amp state dict')
self.logger.info(f'resumed from epoch: {self.epoch}, iter {self.iter}') |
class LLaMABot():
def __init__(self, device, model_path: str=None, peft_model: str=None, quantization: bool=False, max_new_tokens=256, min_new_tokens: int=0, seed: int=None, do_sample: bool=True, use_cache: bool=True, top_p: float=1.0, temperature: float=1.0, top_k: int=50, repetition_penalty: float=1.0, length_penalty: int=1):
if (seed is not None):
torch.cuda.manual_seed(seed)
torch.manual_seed(seed)
self.device = device
self.max_new_tokens = max_new_tokens
self.temperature = temperature
self.top_p = top_p
self.top_k = top_k
self.do_sample = do_sample
self.use_cache = use_cache
self.repetition_penalty = repetition_penalty
self.length_penalty = length_penalty
self.model = load_model(model_path, quantization)
if peft_model:
self.model = load_peft_model(self.model, peft_model)
self.tokenizer = LlamaTokenizer.from_pretrained(model_path)
self.tokenizer.add_special_tokens({'pad_token': '<PAD>'})
def build_dialogs(self, text):
text = [text]
dialogs = format_tokens(text)
return dialogs
def answer(self, chats):
tokens = []
for chat in chats:
tokens.append(sum([self.tokenizer.encode(content) for content in chat], []))
tokens = torch.tensor(tokens).long()
tokens = tokens.to(self.device)
outputs = self.model.generate(tokens, max_new_tokens=self.max_new_tokens, do_sample=self.do_sample, top_p=self.top_p, temperature=self.temperature, use_cache=self.use_cache, top_k=self.top_k, repetition_penalty=self.repetition_penalty, length_penalty=self.length_penalty)
output_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
return output_text.strip()
_grad()
def __call__(self, text):
dialogs = self.build_dialogs(text)
output = self.answer(dialogs)
skip_len = (sum((len(content) for content in dialogs[0])) + 2)
response: str = output[skip_len:]
return response.strip()
def to(self, device):
pass |
class SubsampleGroup(nn.Module):
def __init__(self, num_groups=256, group_size=32, subsample='fps', group='ballquery', radius=0.1, **kwargs):
super().__init__()
self.num_groups = num_groups
self.group_size = group_size
self.subsample = subsample
self.group = group
if (('ball' in self.group.lower()) or ('query' in self.group.lower())):
self.grouper = QueryAndGroup(radius, self.group_size)
elif ('knn' in self.group.lower()):
self.grouper = KNNGroup(self.group_size)
else:
raise NotImplementedError(f'{self.group.lower()} is not implemented. Only support ballquery, knn')
def forward(self, p, x=None):
if (('fps' in self.subsample.lower()) or ('furthest' in self.subsample.lower()) or ('farthest' in self.subsample.lower())):
idx = furthest_point_sample(p, self.num_groups).to(torch.int64)
elif (('random' in self.subsample.lower()) or ('rs' in self.subsample.lower())):
idx = random_sample(p, self.num_groups)
else:
raise NotImplementedError(f'{self.subsample.lower()} is not implemented. Only support fps, random')
center_p = torch.gather(p, 1, idx.unsqueeze((- 1)).expand((- 1), (- 1), 3))
if (x is not None):
(B, C, N) = x.shape[:3]
center_x = torch.gather(x, 2, idx.unsqueeze(1).expand((- 1), C, (- 1))).unsqueeze((- 1))
(grouped_p, fj) = self.grouper(center_p, p, x)
return (grouped_p, center_p, fj, center_x)
else:
(grouped_p, _) = self.grouper(center_p, p)
return (grouped_p, center_p) |
def main():
args = parse_args()
assert (args.out or args.eval or args.format_only or args.show or args.show_dir), 'Please specify at least one operation (save/eval/format/show the results / save the results) with the argument "--out", "--eval", "--format-only", "--show" or "--show-dir"'
if (args.eval and args.format_only):
raise ValueError('--eval and --format_only cannot be both specified')
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
cfg = compat_cfg(cfg)
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if (samples_per_gpu > 1):
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max([ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if (samples_per_gpu > 1):
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False)
if (args.backend == 'onnxruntime'):
from mmdet.core.export.model_wrappers import ONNXRuntimeDetector
model = ONNXRuntimeDetector(args.model, class_names=dataset.CLASSES, device_id=0)
elif (args.backend == 'tensorrt'):
from mmdet.core.export.model_wrappers import TensorRTDetector
model = TensorRTDetector(args.model, class_names=dataset.CLASSES, device_id=0)
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir, args.show_score_thr)
if args.out:
print(f'''
writing results to {args.out}''')
mmcv.dump(outputs, args.out)
kwargs = ({} if (args.eval_options is None) else args.eval_options)
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
for key in ['interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule']:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.