code stringlengths 101 5.91M |
|---|
def privileged_information():
task = generate_task(task_generator_id='reaching')
env = CausalWorld(task=task, enable_visualization=True, normalize_actions=False)
env.expose_potential_partial_solution()
env.reset()
for _ in range(10):
goal_intervention_dict = env.sample_new_goal()
(success_signal, obs) = env.do_intervention(goal_intervention_dict)
print('Goal Intervention success signal', success_signal)
(obs, reward, done, info) = env.step(env.action_space.low)
for i in range(1000):
(obs, reward, done, info) = env.step(info['possible_solution_intervention']['joint_positions'])
print('now we solve it with privileged info')
print(info['possible_solution_intervention'])
print('Partial Solution Setting Intervention Succes Signal', success_signal)
env.close() |
class Plot(object):
def __init__(self, metrics, title, ylabel, xlabel='t', running_n=100):
self.vis = visdom.Visdom()
self.metrics = metrics
self.opts = dict(fillarea=False, xlabel=xlabel, ylabel=ylabel, title=title)
self.win = None
self.running_n = running_n
self.vals = dict()
self.cnts = dict()
def _update_metric(self, metric, x, y):
if (metric not in self.vals):
self.vals[metric] = np.zeros(self.running_n)
self.cnts[metric] = 0
self.vals[metric][(self.cnts[metric] % self.running_n)] = y
self.cnts[metric] += 1
y = self.vals[metric][:min(self.cnts[metric], self.running_n)].mean()
return (np.array([x]), np.array([y]))
def update(self, metric, x, y):
assert (metric in self.metrics), ('metric %s is not in %s' % (metric, self.metrics))
(X, Y) = self._update_metric(metric, x, y)
if (self.win is None):
self.opts['legend'] = [metric]
self.win = self.vis.line(X=X, Y=Y, opts=self.opts)
else:
self.vis.line(X=X, Y=Y, win=self.win, update='append', name=metric) |
def is_valid(row):
if (row['agreement'] != 1):
return False
if (row['label'] == 'Neither'):
return False
return True |
class DiffusionPipeline(ConfigMixin):
config_name = 'model_index.json'
def register_modules(self, **kwargs):
from diffusers import pipelines
for (name, module) in kwargs.items():
library = module.__module__.split('.')[0]
pipeline_dir = module.__module__.split('.')[(- 2)]
path = module.__module__.split('.')
is_pipeline_module = ((pipeline_dir in path) and hasattr(pipelines, pipeline_dir))
if ((library not in LOADABLE_CLASSES) or is_pipeline_module):
library = pipeline_dir
class_name = module.__class__.__name__
register_dict = {name: (library, class_name)}
self.register_to_config(**register_dict)
setattr(self, name, module)
def save_pretrained(self, save_directory: Union[(str, os.PathLike)]):
self.save_config(save_directory)
model_index_dict = dict(self.config)
model_index_dict.pop('_class_name')
model_index_dict.pop('_diffusers_version')
model_index_dict.pop('_module', None)
for pipeline_component_name in model_index_dict.keys():
sub_model = getattr(self, pipeline_component_name)
model_cls = sub_model.__class__
save_method_name = None
for (library_name, library_classes) in LOADABLE_CLASSES.items():
library = importlib.import_module(library_name)
for (base_class, save_load_methods) in library_classes.items():
class_candidate = getattr(library, base_class)
if issubclass(model_cls, class_candidate):
save_method_name = save_load_methods[0]
break
if (save_method_name is not None):
break
save_method = getattr(sub_model, save_method_name)
save_method(os.path.join(save_directory, pipeline_component_name))
def to(self, torch_device: Optional[Union[(str, torch.device)]]=None):
if (torch_device is None):
return self
(module_names, _) = self.extract_init_dict(dict(self.config))
for name in module_names.keys():
module = getattr(self, name)
if isinstance(module, torch.nn.Module):
module.to(torch_device)
return self
def device(self) -> torch.device:
(module_names, _) = self.extract_init_dict(dict(self.config))
for name in module_names.keys():
module = getattr(self, name)
if isinstance(module, torch.nn.Module):
return module.device
return torch.device('cpu')
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[(str, os.PathLike)]], **kwargs):
cache_dir = kwargs.pop('cache_dir', DIFFUSERS_CACHE)
resume_download = kwargs.pop('resume_download', False)
proxies = kwargs.pop('proxies', None)
local_files_only = kwargs.pop('local_files_only', False)
use_auth_token = kwargs.pop('use_auth_token', None)
revision = kwargs.pop('revision', None)
torch_dtype = kwargs.pop('torch_dtype', None)
provider = kwargs.pop('provider', None)
if (not os.path.isdir(pretrained_model_name_or_path)):
cached_folder = snapshot_download(pretrained_model_name_or_path, cache_dir=cache_dir, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision)
else:
cached_folder = pretrained_model_name_or_path
config_dict = cls.get_config_dict(cached_folder)
if (cls != DiffusionPipeline):
pipeline_class = cls
else:
diffusers_module = importlib.import_module(cls.__module__.split('.')[0])
pipeline_class = getattr(diffusers_module, config_dict['_class_name'])
expected_modules = set(inspect.signature(pipeline_class.__init__).parameters.keys())
passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if (k in kwargs)}
(init_dict, _) = pipeline_class.extract_init_dict(config_dict, **kwargs)
init_kwargs = {}
from diffusers import pipelines
for (name, (library_name, class_name)) in init_dict.items():
is_pipeline_module = hasattr(pipelines, library_name)
loaded_sub_model = None
if (name in passed_class_obj):
if (not is_pipeline_module):
library = importlib.import_module(library_name)
class_obj = getattr(library, class_name)
importable_classes = LOADABLE_CLASSES[library_name]
class_candidates = {c: getattr(library, c) for c in importable_classes.keys()}
expected_class_obj = None
for (class_name, class_candidate) in class_candidates.items():
if issubclass(class_obj, class_candidate):
expected_class_obj = class_candidate
if (not issubclass(passed_class_obj[name].__class__, expected_class_obj)):
raise ValueError(f'{passed_class_obj[name]} is of type: {type(passed_class_obj[name])}, but should be {expected_class_obj}')
else:
logger.warn(f'You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it has the correct type')
loaded_sub_model = passed_class_obj[name]
elif is_pipeline_module:
pipeline_module = getattr(pipelines, library_name)
class_obj = getattr(pipeline_module, class_name)
importable_classes = ALL_IMPORTABLE_CLASSES
class_candidates = {c: class_obj for c in importable_classes.keys()}
else:
library = importlib.import_module(library_name)
class_obj = getattr(library, class_name)
importable_classes = LOADABLE_CLASSES[library_name]
class_candidates = {c: getattr(library, c) for c in importable_classes.keys()}
if (loaded_sub_model is None):
load_method_name = None
for (class_name, class_candidate) in class_candidates.items():
if issubclass(class_obj, class_candidate):
load_method_name = importable_classes[class_name][1]
load_method = getattr(class_obj, load_method_name)
loading_kwargs = {}
if issubclass(class_obj, torch.nn.Module):
loading_kwargs['torch_dtype'] = torch_dtype
if issubclass(class_obj, diffusers.OnnxRuntimeModel):
loading_kwargs['provider'] = provider
if os.path.isdir(os.path.join(cached_folder, name)):
loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs)
else:
loaded_sub_model = load_method(cached_folder, **loading_kwargs)
init_kwargs[name] = loaded_sub_model
model = pipeline_class(**init_kwargs)
return model
def numpy_to_pil(images):
if (images.ndim == 3):
images = images[(None, ...)]
images = (images * 255).round().astype('uint8')
pil_images = [Image.fromarray(image) for image in images]
return pil_images
def progress_bar(self, iterable):
if (not hasattr(self, '_progress_bar_config')):
self._progress_bar_config = {}
elif (not isinstance(self._progress_bar_config, dict)):
raise ValueError(f'`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}.')
return tqdm(iterable, **self._progress_bar_config)
def set_progress_bar_config(self, **kwargs):
self._progress_bar_config = kwargs |
def getData(url):
try:
r = requests.get(url)
r.raise_for_status()
except:
print('Error while getting data from', url)
raise
return r.text |
class RDB(nn.Module):
def __init__(self, in_channels, num_dense_layer, growth_rate):
super(RDB, self).__init__()
_in_channels = in_channels
modules = []
for i in range(num_dense_layer):
modules.append(MakeDense(_in_channels, growth_rate))
_in_channels += growth_rate
self.residual_dense_layers = nn.Sequential(*modules)
self.conv_1x1 = nn.Conv2d(_in_channels, in_channels, kernel_size=1, padding=0)
def forward(self, x):
out = self.residual_dense_layers(x)
out = self.conv_1x1(out)
out = (out + x)
return out |
class ResUnetGenerator(NetworkBase):
def __init__(self, conv_dim=64, c_dim=5, repeat_num=6, k_size=4, n_down=2):
super(ResUnetGenerator, self).__init__()
self._name = 'resunet_generator'
self.repeat_num = repeat_num
self.n_down = n_down
encoders = []
encoders.append(nn.Sequential(nn.Conv2d(c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False), nn.InstanceNorm2d(conv_dim, affine=True), nn.ReLU(inplace=True)))
curr_dim = conv_dim
for i in range(n_down):
encoders.append(nn.Sequential(nn.Conv2d(curr_dim, (curr_dim * 2), kernel_size=k_size, stride=2, padding=1, bias=False), nn.InstanceNorm2d((curr_dim * 2), affine=True), nn.ReLU(inplace=True)))
curr_dim = (curr_dim * 2)
self.encoders = nn.Sequential(*encoders)
resnets = []
for i in range(repeat_num):
resnets.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim))
self.resnets = nn.Sequential(*resnets)
decoders = []
skippers = []
for i in range(n_down):
decoders.append(nn.Sequential(nn.ConvTranspose2d(curr_dim, (curr_dim // 2), kernel_size=k_size, stride=2, padding=1, output_padding=1, bias=False), nn.InstanceNorm2d((curr_dim // 2), affine=True), nn.ReLU(inplace=True)))
skippers.append(nn.Sequential(nn.Conv2d(curr_dim, (curr_dim // 2), kernel_size=k_size, stride=1, padding=1, bias=False), nn.InstanceNorm2d((curr_dim // 2), affine=True), nn.ReLU(inplace=True)))
curr_dim = (curr_dim // 2)
self.decoders = nn.Sequential(*decoders)
self.skippers = nn.Sequential(*skippers)
layers = []
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
self.img_reg = nn.Sequential(*layers)
layers = []
layers.append(nn.Conv2d(curr_dim, 1, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Sigmoid())
self.attetion_reg = nn.Sequential(*layers)
def inference(self, x):
encoder_outs = self.encode(x)
resnet_outs = []
src_x = encoder_outs[(- 1)]
for i in range(self.repeat_num):
src_x = self.resnets[i](src_x)
resnet_outs.append(src_x)
return (encoder_outs, resnet_outs)
def forward(self, x):
encoder_outs = self.encode(x)
resnet_outs = self.resnets(encoder_outs[(- 1)])
d_out = self.decode(resnet_outs, encoder_outs)
(img_outs, mask_outs) = self.regress(d_out)
return (img_outs, mask_outs)
def encode(self, x):
e_out = self.encoders[0](x)
encoder_outs = [e_out]
for i in range(1, (self.n_down + 1)):
e_out = self.encoders[i](e_out)
encoder_outs.append(e_out)
return encoder_outs
def decode(self, x, encoder_outs):
d_out = x
for i in range(self.n_down):
d_out = self.decoders[i](d_out)
skip = encoder_outs[((self.n_down - 1) - i)]
d_out = torch.cat([skip, d_out], dim=1)
d_out = self.skippers[i](d_out)
return d_out
def regress(self, x):
return (self.img_reg(x), self.attetion_reg(x)) |
def set_grad(params, params_with_grad):
for (param, param_w_grad) in zip(params, params_with_grad):
if (param.grad is None):
param.grad = torch.nn.Parameter(param.data.new().resize_(*param.data.size()))
param.grad.data.copy_(param_w_grad.grad.data) |
class InternalFortranAst():
def __init__(self, ast: f03.Program, tables: symbol_table.SymbolTables):
self.ast = ast
self.tables = tables
self.functions_and_subroutines = []
self.symbols = {}
self.types = {'LOGICAL': 'BOOL', 'CHARACTER': 'CHAR', 'INTEGER': 'INTEGER', 'INTEGER4': 'INTEGER', 'REAL4': 'REAL', 'REAL8': 'DOUBLE', 'DOUBLE PRECISION': 'DOUBLE', 'REAL': 'REAL'}
from dace.frontend.fortran.intrinsics import FortranIntrinsics
self.intrinsic_handler = FortranIntrinsics()
self.supported_fortran_syntax = {'str': self.str_node, 'tuple': self.tuple_node, 'Program': self.program, 'Main_Program': self.main_program, 'Program_Stmt': self.program_stmt, 'End_Program_Stmt': self.end_program_stmt, 'Subroutine_Subprogram': self.subroutine_subprogram, 'Function_Subprogram': self.function_subprogram, 'Subroutine_Stmt': self.subroutine_stmt, 'Function_Stmt': self.function_stmt, 'End_Subroutine_Stmt': self.end_subroutine_stmt, 'End_Function_Stmt': self.end_function_stmt, 'Module': self.module, 'Module_Stmt': self.module_stmt, 'End_Module_Stmt': self.end_module_stmt, 'Use_Stmt': self.use_stmt, 'Implicit_Part': self.implicit_part, 'Implicit_Stmt': self.implicit_stmt, 'Implicit_None_Stmt': self.implicit_none_stmt, 'Implicit_Part_Stmt': self.implicit_part_stmt, 'Declaration_Construct': self.declaration_construct, 'Declaration_Type_Spec': self.declaration_type_spec, 'Type_Declaration_Stmt': self.type_declaration_stmt, 'Entity_Decl': self.entity_decl, 'Array_Spec': self.array_spec, 'Ac_Value_List': self.ac_value_list, 'Array_Constructor': self.array_constructor, 'Loop_Control': self.loop_control, 'Block_Nonlabel_Do_Construct': self.block_nonlabel_do_construct, 'Real_Literal_Constant': self.real_literal_constant, 'Subscript_Triplet': self.subscript_triplet, 'Section_Subscript_List': self.section_subscript_list, 'Explicit_Shape_Spec_List': self.explicit_shape_spec_list, 'Explicit_Shape_Spec': self.explicit_shape_spec, 'Type_Attr_Spec': self.type_attr_spec, 'Attr_Spec': self.attr_spec, 'Intent_Spec': self.intent_spec, 'Access_Spec': self.access_spec, 'Allocatable_Stmt': self.allocatable_stmt, 'Asynchronous_Stmt': self.asynchronous_stmt, 'Bind_Stmt': self.bind_stmt, 'Common_Stmt': self.common_stmt, 'Data_Stmt': self.data_stmt, 'Dimension_Stmt': self.dimension_stmt, 'External_Stmt': self.external_stmt, 'Intent_Stmt': self.intent_stmt, 'Intrinsic_Stmt': self.intrinsic_stmt, 'Optional_Stmt': self.optional_stmt, 'Parameter_Stmt': self.parameter_stmt, 'Pointer_Stmt': self.pointer_stmt, 'Protected_Stmt': self.protected_stmt, 'Save_Stmt': self.save_stmt, 'Target_Stmt': self.target_stmt, 'Value_Stmt': self.value_stmt, 'Volatile_Stmt': self.volatile_stmt, 'Execution_Part': self.execution_part, 'Execution_Part_Construct': self.execution_part_construct, 'Action_Stmt': self.action_stmt, 'Assignment_Stmt': self.assignment_stmt, 'Pointer_Assignment_Stmt': self.pointer_assignment_stmt, 'Where_Stmt': self.where_stmt, 'Forall_Stmt': self.forall_stmt, 'Where_Construct': self.where_construct, 'Where_Construct_Stmt': self.where_construct_stmt, 'Masked_Elsewhere_Stmt': self.masked_elsewhere_stmt, 'Elsewhere_Stmt': self.elsewhere_stmt, 'End_Where_Stmt': self.end_where_stmt, 'Forall_Construct': self.forall_construct, 'Forall_Header': self.forall_header, 'Forall_Triplet_Spec': self.forall_triplet_spec, 'Forall_Stmt': self.forall_stmt, 'End_Forall_Stmt': self.end_forall_stmt, 'Arithmetic_If_Stmt': self.arithmetic_if_stmt, 'If_Construct': self.if_construct, 'If_Stmt': self.if_stmt, 'If_Then_Stmt': self.if_then_stmt, 'Else_If_Stmt': self.else_if_stmt, 'Else_Stmt': self.else_stmt, 'End_If_Stmt': self.end_if_stmt, 'Case_Construct': self.case_construct, 'Select_Case_Stmt': self.select_case_stmt, 'Case_Stmt': self.case_stmt, 'End_Select_Stmt': self.end_select_stmt, 'Do_Construct': self.do_construct, 'Label_Do_Stmt': self.label_do_stmt, 'Nonlabel_Do_Stmt': self.nonlabel_do_stmt, 'End_Do_Stmt': self.end_do_stmt, 'Interface_Block': self.interface_block, 'Interface_Stmt': self.interface_stmt, 'End_Interface_Stmt': self.end_interface_stmt, 'Generic_Spec': self.generic_spec, 'Name': self.name, 'Type_Name': self.type_name, 'Specification_Part': self.specification_part, 'Intrinsic_Type_Spec': self.intrinsic_type_spec, 'Entity_Decl_List': self.entity_decl_list, 'Int_Literal_Constant': self.int_literal_constant, 'Logical_Literal_Constant': self.logical_literal_constant, 'Actual_Arg_Spec_List': self.actual_arg_spec_list, 'Attr_Spec_List': self.attr_spec_list, 'Initialization': self.initialization, 'Procedure_Declaration_Stmt': self.procedure_declaration_stmt, 'Type_Bound_Procedure_Part': self.type_bound_procedure_part, 'Contains_Stmt': self.contains_stmt, 'Call_Stmt': self.call_stmt, 'Return_Stmt': self.return_stmt, 'Stop_Stmt': self.stop_stmt, 'Dummy_Arg_List': self.dummy_arg_list, 'Part_Ref': self.part_ref, 'Level_2_Expr': self.level_2_expr, 'Equiv_Operand': self.level_2_expr, 'Level_3_Expr': self.level_2_expr, 'Level_4_Expr': self.level_2_expr, 'Add_Operand': self.level_2_expr, 'Or_Operand': self.level_2_expr, 'And_Operand': self.level_2_expr, 'Level_2_Unary_Expr': self.level_2_expr, 'Mult_Operand': self.power_expr, 'Parenthesis': self.parenthesis_expr, 'Intrinsic_Name': self.intrinsic_handler.replace_function_name, 'Intrinsic_Function_Reference': self.intrinsic_function_reference, 'Only_List': self.only_list, 'Structure_Constructor': self.structure_constructor, 'Component_Spec_List': self.component_spec_list, 'Write_Stmt': self.write_stmt, 'Assumed_Shape_Spec_List': self.assumed_shape_spec_list, 'Allocate_Stmt': self.allocate_stmt, 'Allocation_List': self.allocation_list, 'Allocation': self.allocation, 'Allocate_Shape_Spec': self.allocate_shape_spec, 'Allocate_Shape_Spec_List': self.allocate_shape_spec_list}
def fortran_intrinsics(self) -> 'FortranIntrinsics':
return self.intrinsic_handler
def list_tables(self):
for i in self.tables._symbol_tables:
print(i)
def create_children(self, node: FASTNode):
return ([self.create_ast(child) for child in node] if isinstance(node, (list, tuple)) else [self.create_ast(child) for child in node.children])
def create_ast(self, node=None):
if (node is not None):
if isinstance(node, (list, tuple)):
return [self.create_ast(child) for child in node]
return self.supported_fortran_syntax[type(node).__name__](node)
return None
def write_stmt(self, node: FASTNode):
children = self.create_children(node.children[1])
line = get_line(node)
return ast_internal_classes.Write_Stmt_Node(args=children, line_number=line)
def program(self, node: FASTNode):
children = self.create_children(node)
main_program = get_child(children, ast_internal_classes.Main_Program_Node)
function_definitions = [i for i in children if isinstance(i, ast_internal_classes.Function_Subprogram_Node)]
subroutine_definitions = [i for i in children if isinstance(i, ast_internal_classes.Subroutine_Subprogram_Node)]
modules = [node for node in children if isinstance(node, ast_internal_classes.Module_Node)]
return ast_internal_classes.Program_Node(main_program=main_program, function_definitions=function_definitions, subroutine_definitions=subroutine_definitions, modules=modules)
def main_program(self, node: FASTNode):
children = self.create_children(node)
name = get_child(children, ast_internal_classes.Program_Stmt_Node)
specification_part = get_child(children, ast_internal_classes.Specification_Part_Node)
execution_part = get_child(children, ast_internal_classes.Execution_Part_Node)
return ast_internal_classes.Main_Program_Node(name=name, specification_part=specification_part, execution_part=execution_part)
def program_stmt(self, node: FASTNode):
children = self.create_children(node)
name = get_child(children, Name_Node)
return ast_internal_classes.Program_Stmt_Node(name=name, line_number=node.item.span)
def subroutine_subprogram(self, node: FASTNode):
children = self.create_children(node)
name = get_child(children, ast_internal_classes.Subroutine_Stmt_Node)
specification_part = get_child(children, ast_internal_classes.Specification_Part_Node)
execution_part = get_child(children, ast_internal_classes.Execution_Part_Node)
return_type = ast_internal_classes.Void
return ast_internal_classes.Subroutine_Subprogram_Node(name=name.name, args=name.args, specification_part=specification_part, execution_part=execution_part, type=return_type, line_number=name.line_number)
def end_program_stmt(self, node: FASTNode):
return node
def only_list(self, node: FASTNode):
children = self.create_children(node)
names = [i for i in children if isinstance(i, ast_internal_classes.Name_Node)]
return ast_internal_classes.Only_List_Node(names=names)
def function_subprogram(self, node: FASTNode):
raise NotImplementedError('Function subprograms are not supported yet')
def subroutine_stmt(self, node: FASTNode):
children = self.create_children(node)
name = get_child(children, ast_internal_classes.Name_Node)
args = get_child(children, ast_internal_classes.Arg_List_Node)
return ast_internal_classes.Subroutine_Stmt_Node(name=name, args=args.args, line_number=node.item.span)
def ac_value_list(self, node: FASTNode):
children = self.create_children(node)
return ast_internal_classes.Ac_Value_List_Node(value_list=children)
def power_expr(self, node: FASTNode):
children = self.create_children(node)
line = get_line(node)
return ast_internal_classes.Call_Expr_Node(name=ast_internal_classes.Name_Node(name='pow'), args=[children[0], children[2]], line_number=line)
def array_constructor(self, node: FASTNode):
children = self.create_children(node)
value_list = get_child(children, ast_internal_classes.Ac_Value_List_Node)
return ast_internal_classes.Array_Constructor_Node(value_list=value_list.value_list)
def allocate_stmt(self, node: FASTNode):
children = self.create_children(node)
return ast_internal_classes.Allocate_Stmt_Node(allocation_list=children[1])
def allocation_list(self, node: FASTNode):
children = self.create_children(node)
return children
def allocation(self, node: FASTNode):
children = self.create_children(node)
name = get_child(children, ast_internal_classes.Name_Node)
shape = get_child(children, ast_internal_classes.Allocate_Shape_Spec_List)
return ast_internal_classes.Allocation_Node(name=name, shape=shape)
def allocate_shape_spec_list(self, node: FASTNode):
children = self.create_children(node)
return ast_internal_classes.Allocate_Shape_Spec_List(shape_list=children)
def allocate_shape_spec(self, node: FASTNode):
children = self.create_children(node)
if (len(children) != 2):
raise NotImplementedError('Only simple allocate shape specs are supported')
return children[1]
def structure_constructor(self, node: FASTNode):
children = self.create_children(node)
name = get_child(children, ast_internal_classes.Type_Name_Node)
args = get_child(children, ast_internal_classes.Component_Spec_List_Node)
return ast_internal_classes.Structure_Constructor_Node(name=name, args=args.args, type=None)
def intrinsic_function_reference(self, node: FASTNode):
children = self.create_children(node)
line = get_line(node)
name = get_child(children, ast_internal_classes.Name_Node)
args = get_child(children, ast_internal_classes.Arg_List_Node)
return self.intrinsic_handler.replace_function_reference(name, args, line)
def function_stmt(self, node: FASTNode):
raise NotImplementedError('Function statements are not supported yet - at least not if defined this way. Not encountered in code yet.')
def end_subroutine_stmt(self, node: FASTNode):
return node
def end_function_stmt(self, node: FASTNode):
return node
def parenthesis_expr(self, node: FASTNode):
children = self.create_children(node)
return ast_internal_classes.Parenthesis_Expr_Node(expr=children[1])
def module(self, node: FASTNode):
children = self.create_children(node)
name = get_child(children, ast_internal_classes.Module_Stmt_Node)
specification_part = get_child(children, ast_internal_classes.Specification_Part_Node)
function_definitions = [i for i in children if isinstance(i, ast_internal_classes.Function_Subprogram_Node)]
subroutine_definitions = [i for i in children if isinstance(i, ast_internal_classes.Subroutine_Subprogram_Node)]
return ast_internal_classes.Module_Node(name=name.name, specification_part=specification_part, function_definitions=function_definitions, subroutine_definitions=subroutine_definitions, line_number=name.line_number)
def module_stmt(self, node: FASTNode):
children = self.create_children(node)
name = get_child(children, ast_internal_classes.Name_Node)
return ast_internal_classes.Module_Stmt_Node(name=name, line_number=node.item.span)
def end_module_stmt(self, node: FASTNode):
return node
def use_stmt(self, node: FASTNode):
children = self.create_children(node)
name = get_child(children, ast_internal_classes.Name_Node)
only_list = get_child(children, ast_internal_classes.Only_List_Node)
return ast_internal_classes.Use_Stmt_Node(name=name.name, list=only_list.names)
def implicit_part(self, node: FASTNode):
return node
def implicit_stmt(self, node: FASTNode):
return node
def implicit_none_stmt(self, node: FASTNode):
return node
def implicit_part_stmt(self, node: FASTNode):
return node
def declaration_construct(self, node: FASTNode):
raise NotImplementedError('Declaration constructs are not supported yet')
return node
def declaration_type_spec(self, node: FASTNode):
raise NotImplementedError('Declaration type spec is not supported yet')
return node
def assumed_shape_spec_list(self, node: FASTNode):
return node
def parse_shape_specification(self, dim: f03.Explicit_Shape_Spec, size: List[FASTNode], offset: List[int]):
dim_expr = [i for i in dim.children if (i is not None)]
if (len(dim_expr) == 1):
dim_expr = dim_expr[0]
size.append(self.create_ast(dim_expr))
offset.append(1)
elif (len(dim_expr) == 2):
for expr in dim_expr:
if (not isinstance(expr, f03.Int_Literal_Constant)):
raise TypeError('Array offsets must be constant expressions!')
offset.append(int(dim_expr[0].tostr()))
fortran_size = ((int(dim_expr[1].tostr()) - int(dim_expr[0].tostr())) + 1)
fortran_ast_size = f03.Int_Literal_Constant(str(fortran_size))
size.append(self.create_ast(fortran_ast_size))
else:
raise TypeError('Array dimension must be at most two expressions')
def type_declaration_stmt(self, node: FASTNode):
type_of_node = get_child(node, [f03.Intrinsic_Type_Spec, f03.Declaration_Type_Spec])
if isinstance(type_of_node, f03.Intrinsic_Type_Spec):
derived_type = False
basetype = type_of_node.items[0]
elif isinstance(type_of_node, f03.Declaration_Type_Spec):
derived_type = True
basetype = type_of_node.items[1].string
else:
raise TypeError('Type of node must be either Intrinsic_Type_Spec or Declaration_Type_Spec')
kind = None
if (len(type_of_node.items) >= 2):
if (type_of_node.items[1] is not None):
if (not derived_type):
kind = type_of_node.items[1].items[1].string
if (self.symbols[kind] is not None):
if (basetype == 'REAL'):
if (self.symbols[kind].value == '8'):
basetype = 'REAL8'
elif (basetype == 'INTEGER'):
if (self.symbols[kind].value == '4'):
basetype = 'INTEGER'
else:
raise TypeError('Derived type not supported')
else:
raise TypeError('Derived type not supported')
if derived_type:
raise TypeError('Derived type not supported')
if (not derived_type):
testtype = self.types[basetype]
else:
testtype = basetype
names_list = get_child(node, ['Entity_Decl_List', 'Component_Decl_List'])
names = get_children(names_list, [f03.Entity_Decl, f03.Component_Decl])
attributes = get_children(node, 'Attr_Spec_List')
alloc = False
symbol = False
attr_size = None
attr_offset = None
for i in attributes:
if (i.string.lower() == 'allocatable'):
alloc = True
if (i.string.lower() == 'parameter'):
symbol = True
if isinstance(i, f08.Attr_Spec_List):
dimension_spec = get_children(i, 'Dimension_Attr_Spec')
if (len(dimension_spec) == 0):
continue
attr_size = []
attr_offset = []
sizes = get_child(dimension_spec[0], ['Explicit_Shape_Spec_List'])
for shape_spec in get_children(sizes, [f03.Explicit_Shape_Spec]):
self.parse_shape_specification(shape_spec, attr_size, attr_offset)
vardecls = []
for var in names:
size = None
offset = None
var_components = self.create_children(var)
array_sizes = get_children(var, 'Explicit_Shape_Spec_List')
actual_name = get_child(var_components, ast_internal_classes.Name_Node)
if (len(array_sizes) == 1):
array_sizes = array_sizes[0]
size = []
offset = []
for dim in array_sizes.children:
if isinstance(dim, f03.Explicit_Shape_Spec):
self.parse_shape_specification(dim, size, offset)
init = None
initialization = get_children(var, f03.Initialization)
if (len(initialization) == 1):
initialization = initialization[0]
if (len(initialization.children) < 2):
raise ValueError('Initialization must have an expression')
raw_init = initialization.children[1]
init = self.create_ast(raw_init)
if (symbol == False):
if (attr_size is None):
vardecls.append(ast_internal_classes.Var_Decl_Node(name=actual_name.name, type=testtype, alloc=alloc, sizes=size, offsets=offset, kind=kind, line_number=node.item.span))
else:
vardecls.append(ast_internal_classes.Var_Decl_Node(name=actual_name.name, type=testtype, alloc=alloc, sizes=attr_size, offsets=attr_offset, kind=kind, line_number=node.item.span))
elif ((size is None) and (attr_size is None)):
self.symbols[actual_name.name] = init
vardecls.append(ast_internal_classes.Symbol_Decl_Node(name=actual_name.name, type=testtype, alloc=alloc, init=init, line_number=node.item.span))
elif (attr_size is not None):
vardecls.append(ast_internal_classes.Symbol_Array_Decl_Node(name=actual_name.name, type=testtype, alloc=alloc, sizes=attr_size, offsets=attr_offset, kind=kind, init=init, line_number=node.item.span))
else:
vardecls.append(ast_internal_classes.Symbol_Array_Decl_Node(name=actual_name.name, type=testtype, alloc=alloc, sizes=size, offsets=offset, kind=kind, init=init, line_number=node.item.span))
return ast_internal_classes.Decl_Stmt_Node(vardecl=vardecls, line_number=node.item.span)
def entity_decl(self, node: FASTNode):
raise NotImplementedError('Entity decl is not supported yet')
def array_spec(self, node: FASTNode):
raise NotImplementedError('Array spec is not supported yet')
return node
def explicit_shape_spec_list(self, node: FASTNode):
return node
def explicit_shape_spec(self, node: FASTNode):
return node
def type_attr_spec(self, node: FASTNode):
return node
def attr_spec(self, node: FASTNode):
return node
def intent_spec(self, node: FASTNode):
raise NotImplementedError('Intent spec is not supported yet')
return node
def access_spec(self, node: FASTNode):
raise NotImplementedError('Access spec is not supported yet')
return node
def allocatable_stmt(self, node: FASTNode):
raise NotImplementedError('Allocatable stmt is not supported yet')
return node
def asynchronous_stmt(self, node: FASTNode):
raise NotImplementedError('Asynchronous stmt is not supported yet')
return node
def bind_stmt(self, node: FASTNode):
raise NotImplementedError('Bind stmt is not supported yet')
return node
def common_stmt(self, node: FASTNode):
raise NotImplementedError('Common stmt is not supported yet')
return node
def data_stmt(self, node: FASTNode):
raise NotImplementedError('Data stmt is not supported yet')
return node
def dimension_stmt(self, node: FASTNode):
raise NotImplementedError('Dimension stmt is not supported yet')
return node
def external_stmt(self, node: FASTNode):
raise NotImplementedError('External stmt is not supported yet')
return node
def intent_stmt(self, node: FASTNode):
return node
def intrinsic_stmt(self, node: FASTNode):
return node
def optional_stmt(self, node: FASTNode):
return node
def parameter_stmt(self, node: FASTNode):
return node
def pointer_stmt(self, node: FASTNode):
return node
def protected_stmt(self, node: FASTNode):
return node
def save_stmt(self, node: FASTNode):
return node
def target_stmt(self, node: FASTNode):
return node
def value_stmt(self, node: FASTNode):
return node
def volatile_stmt(self, node: FASTNode):
return node
def execution_part(self, node: FASTNode):
children = self.create_children(node)
return ast_internal_classes.Execution_Part_Node(execution=children)
def execution_part_construct(self, node: FASTNode):
return node
def action_stmt(self, node: FASTNode):
return node
def level_2_expr(self, node: FASTNode):
children = self.create_children(node)
line = get_line(node)
if (len(children) == 3):
return ast_internal_classes.BinOp_Node(lval=children[0], op=children[1], rval=children[2], line_number=line)
else:
return ast_internal_classes.UnOp_Node(lval=children[1], op=children[0], line_number=line)
def assignment_stmt(self, node: FASTNode):
children = self.create_children(node)
line = get_line(node)
if (len(children) == 3):
return ast_internal_classes.BinOp_Node(lval=children[0], op=children[1], rval=children[2], line_number=line)
else:
return ast_internal_classes.UnOp_Node(lval=children[1], op=children[0], line_number=line)
def pointer_assignment_stmt(self, node: FASTNode):
return node
def where_stmt(self, node: FASTNode):
return node
def forall_stmt(self, node: FASTNode):
return node
def where_construct(self, node: FASTNode):
return node
def where_construct_stmt(self, node: FASTNode):
return node
def masked_elsewhere_stmt(self, node: FASTNode):
return node
def elsewhere_stmt(self, node: FASTNode):
return node
def end_where_stmt(self, node: FASTNode):
return node
def forall_construct(self, node: FASTNode):
return node
def forall_header(self, node: FASTNode):
return node
def forall_triplet_spec(self, node: FASTNode):
return node
def forall_stmt(self, node: FASTNode):
return node
def end_forall_stmt(self, node: FASTNode):
return node
def arithmetic_if_stmt(self, node: FASTNode):
return node
def if_stmt(self, node: FASTNode):
children = self.create_children(node)
line = get_line(node)
cond = children[0]
body = children[1:]
return ast_internal_classes.If_Stmt_Node(cond=cond, body=ast_internal_classes.Execution_Part_Node(execution=body), body_else=ast_internal_classes.Execution_Part_Node(execution=[]), line_number=line)
def if_construct(self, node: FASTNode):
children = self.create_children(node)
cond = children[0]
body = []
body_else = []
else_mode = False
line = get_line(node)
if (line is None):
line = cond.line_number
toplevelIf = ast_internal_classes.If_Stmt_Node(cond=cond, line_number=line)
currentIf = toplevelIf
for i in children[1:(- 1)]:
if isinstance(i, ast_internal_classes.Else_If_Stmt_Node):
newif = ast_internal_classes.If_Stmt_Node(cond=i.cond, line_number=i.line_number)
currentIf.body = ast_internal_classes.Execution_Part_Node(execution=body)
currentIf.body_else = ast_internal_classes.Execution_Part_Node(execution=[newif])
currentIf = newif
body = []
continue
if isinstance(i, ast_internal_classes.Else_Separator_Node):
else_mode = True
continue
if else_mode:
body_else.append(i)
else:
body.append(i)
currentIf.body = ast_internal_classes.Execution_Part_Node(execution=body)
currentIf.body_else = ast_internal_classes.Execution_Part_Node(execution=body_else)
return toplevelIf
def if_then_stmt(self, node: FASTNode):
children = self.create_children(node)
if (len(children) != 1):
raise ValueError('If statement must have a condition')
return_value = children[0]
return_value.line_number = node.item.span
return return_value
def else_if_stmt(self, node: FASTNode):
children = self.create_children(node)
return ast_internal_classes.Else_If_Stmt_Node(cond=children[0], line_number=get_line(node))
def else_stmt(self, node: FASTNode):
return ast_internal_classes.Else_Separator_Node(line_number=node.item.span)
def end_if_stmt(self, node: FASTNode):
return node
def case_construct(self, node: FASTNode):
return node
def select_case_stmt(self, node: FASTNode):
return node
def case_stmt(self, node: FASTNode):
return node
def end_select_stmt(self, node: FASTNode):
return node
def do_construct(self, node: FASTNode):
return node
def label_do_stmt(self, node: FASTNode):
return node
def nonlabel_do_stmt(self, node: FASTNode):
children = self.create_children(node)
loop_control = get_child(children, ast_internal_classes.Loop_Control_Node)
return ast_internal_classes.Nonlabel_Do_Stmt_Node(iter=loop_control.iter, cond=loop_control.cond, init=loop_control.init, line_number=node.item.span)
def end_do_stmt(self, node: FASTNode):
return node
def interface_block(self, node: FASTNode):
return node
def interface_stmt(self, node: FASTNode):
return node
def end_interface_stmt(self, node: FASTNode):
return node
def generic_spec(self, node: FASTNode):
return node
def procedure_declaration_stmt(self, node: FASTNode):
return node
def type_bound_procedure_part(self, node: FASTNode):
return node
def contains_stmt(self, node: FASTNode):
return node
def call_stmt(self, node: FASTNode):
children = self.create_children(node)
name = get_child(children, ast_internal_classes.Name_Node)
args = get_child(children, ast_internal_classes.Arg_List_Node)
return ast_internal_classes.Call_Expr_Node(name=name, args=args.args, type=None, line_number=node.item.span)
def return_stmt(self, node: FASTNode):
return node
def stop_stmt(self, node: FASTNode):
return node
def dummy_arg_list(self, node: FASTNode):
children = self.create_children(node)
return ast_internal_classes.Arg_List_Node(args=children)
def component_spec_list(self, node: FASTNode):
children = self.create_children(node)
return ast_internal_classes.Component_Spec_List_Node(args=children)
def attr_spec_list(self, node: FASTNode):
return node
def part_ref(self, node: FASTNode):
children = self.create_children(node)
line = get_line(node)
name = get_child(children, ast_internal_classes.Name_Node)
args = get_child(children, ast_internal_classes.Section_Subscript_List_Node)
return ast_internal_classes.Call_Expr_Node(name=name, args=args.list, line=line)
def loop_control(self, node: FASTNode):
children = self.create_children(node)
iteration_variable = children[1][0]
loop_start = children[1][1][0]
loop_end = children[1][1][1]
if (len(children[1][1]) == 3):
loop_step = children[1][1][2]
else:
loop_step = ast_internal_classes.Int_Literal_Node(value='1')
init_expr = ast_internal_classes.BinOp_Node(lval=iteration_variable, op='=', rval=loop_start)
if isinstance(loop_step, ast_internal_classes.UnOp_Node):
if (loop_step.op == '-'):
cond_expr = ast_internal_classes.BinOp_Node(lval=iteration_variable, op='>=', rval=loop_end)
else:
cond_expr = ast_internal_classes.BinOp_Node(lval=iteration_variable, op='<=', rval=loop_end)
iter_expr = ast_internal_classes.BinOp_Node(lval=iteration_variable, op='=', rval=ast_internal_classes.BinOp_Node(lval=iteration_variable, op='+', rval=loop_step))
return ast_internal_classes.Loop_Control_Node(init=init_expr, cond=cond_expr, iter=iter_expr)
def block_nonlabel_do_construct(self, node: FASTNode):
children = self.create_children(node)
do = get_child(children, ast_internal_classes.Nonlabel_Do_Stmt_Node)
body = children[1:(- 1)]
return ast_internal_classes.For_Stmt_Node(init=do.init, cond=do.cond, iter=do.iter, body=ast_internal_classes.Execution_Part_Node(execution=body), line_number=do.line_number)
def real_literal_constant(self, node: FASTNode):
return node
def subscript_triplet(self, node: FASTNode):
if (node.string == ':'):
return ast_internal_classes.ParDecl_Node(type='ALL')
children = self.create_children(node)
return ast_internal_classes.ParDecl_Node(type='RANGE', range=children)
def section_subscript_list(self, node: FASTNode):
children = self.create_children(node)
return ast_internal_classes.Section_Subscript_List_Node(list=children)
def specification_part(self, node: FASTNode):
others = [self.create_ast(i) for i in node.children if (not isinstance(i, f08.Type_Declaration_Stmt))]
decls = [self.create_ast(i) for i in node.children if isinstance(i, f08.Type_Declaration_Stmt)]
uses = [self.create_ast(i) for i in node.children if isinstance(i, f03.Use_Stmt)]
tmp = [self.create_ast(i) for i in node.children]
typedecls = [i for i in tmp if isinstance(i, ast_internal_classes.Type_Decl_Node)]
symbols = []
for i in others:
if isinstance(i, list):
symbols.extend((j for j in i if isinstance(j, ast_internal_classes.Symbol_Array_Decl_Node)))
if isinstance(i, ast_internal_classes.Decl_Stmt_Node):
symbols.extend((j for j in i.vardecl if isinstance(j, ast_internal_classes.Symbol_Array_Decl_Node)))
for i in decls:
if isinstance(i, list):
symbols.extend((j for j in i if isinstance(j, ast_internal_classes.Symbol_Array_Decl_Node)))
if isinstance(i, ast_internal_classes.Decl_Stmt_Node):
symbols.extend((j for j in i.vardecl if isinstance(j, ast_internal_classes.Symbol_Array_Decl_Node)))
names_filtered = []
for j in symbols:
for i in decls:
names_filtered.extend((ii.name for ii in i.vardecl if (j.name == ii.name)))
decl_filtered = []
for i in decls:
vardecl_filtered = [ii for ii in i.vardecl if (ii.name not in names_filtered)]
if vardecl_filtered:
decl_filtered.append(ast_internal_classes.Decl_Stmt_Node(vardecl=vardecl_filtered))
return ast_internal_classes.Specification_Part_Node(specifications=decl_filtered, symbols=symbols, uses=uses, typedecls=typedecls)
def intrinsic_type_spec(self, node: FASTNode):
return node
def entity_decl_list(self, node: FASTNode):
return node
def int_literal_constant(self, node: FASTNode):
return ast_internal_classes.Int_Literal_Node(value=node.string)
def logical_literal_constant(self, node: FASTNode):
if (node.string in ['.TRUE.', '.true.', '.True.']):
return ast_internal_classes.Bool_Literal_Node(value='True')
if (node.string in ['.FALSE.', '.false.', '.False.']):
return ast_internal_classes.Bool_Literal_Node(value='False')
raise ValueError('Unknown logical literal constant')
def real_literal_constant(self, node: FASTNode):
return ast_internal_classes.Real_Literal_Node(value=node.string)
def actual_arg_spec_list(self, node: FASTNode):
children = self.create_children(node)
return ast_internal_classes.Arg_List_Node(args=children)
def initialization(self, node: FASTNode):
return node
def name(self, node: FASTNode):
return ast_internal_classes.Name_Node(name=node.string)
def type_name(self, node: FASTNode):
return ast_internal_classes.Type_Name_Node(name=node.string)
def tuple_node(self, node: FASTNode):
return node
def str_node(self, node: FASTNode):
return node |
.experimental
def test_get_nearest_items(log):
model = ADMMSLIM(seed=SEED)
model.fit(log.filter((sf.col('item_idx') != 3)))
res = model.get_nearest_items(items=[0, 1], k=2, metric=None)
assert (res.count() == 4)
assert (set(res.toPandas().to_dict()['item_idx'].values()) == {0, 1})
res = model.get_nearest_items(items=[0, 1], k=1, metric=None)
assert (res.count() == 2)
res = model.get_nearest_items(items=[0, 1], k=4, metric=None, candidates=[0, 3])
assert (res.count() == 1)
assert (len(set(res.toPandas().to_dict()['item_idx'].values()).difference({0, 1})) == 0) |
_function
def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
assert isinstance(x, torch.Tensor)
assert ((clamp is None) or (clamp >= 0))
spec = activation_funcs[act]
alpha = float((alpha if (alpha is not None) else spec.def_alpha))
gain = float((gain if (gain is not None) else spec.def_gain))
clamp = float((clamp if (clamp is not None) else (- 1)))
if (b is not None):
assert (isinstance(b, torch.Tensor) and (b.ndim == 1))
assert (0 <= dim < x.ndim)
assert (b.shape[0] == x.shape[dim])
x = (x + b.reshape([((- 1) if (i == dim) else 1) for i in range(x.ndim)]))
alpha = float(alpha)
x = spec.func(x, alpha=alpha)
gain = float(gain)
if (gain != 1):
x = (x * gain)
if (clamp >= 0):
x = x.clamp((- clamp), clamp)
return x |
def get_log_prob_fn(model, model_args=(), model_kwargs={}, implementation='pyro', automatic_transform_enabled=False, transforms=None, max_plate_nesting=None, jit_compile=False, jit_options=None, skip_jit_warnings=False, **kwargs) -> (Callable, Dict[(str, Any)]):
if (transforms is None):
transforms = {}
if (max_plate_nesting is None):
max_plate_nesting = _guess_max_plate_nesting(model, model_args, model_kwargs)
model = poutine.enum(config_enumerate(model), first_available_dim=((- 1) - max_plate_nesting))
model_trace = poutine.trace(model).get_trace(*model_args, **model_kwargs)
has_enumerable_sites = False
for (name, node) in model_trace.iter_stochastic_nodes():
fn = node['fn']
if isinstance(fn, _Subsample):
if ((fn.subsample_size is not None) and (fn.subsample_size < fn.size)):
raise NotImplementedError('Model with subsample sites are not supported.')
continue
if fn.has_enumerate_support:
has_enumerable_sites = True
continue
if automatic_transform_enabled:
transforms[name] = biject_to(fn.support).inv
else:
transforms[name] = dist.transforms.IndependentTransform(dist.transforms.identity_transform, 1)
if (implementation == 'pyro'):
trace_prob_evaluator = TraceEinsumEvaluator(model_trace, has_enumerable_sites, max_plate_nesting)
lp_maker = _LPMaker(model, model_args, model_kwargs, trace_prob_evaluator, transforms)
lp_fn = lp_maker.get_lp_fn(jit_compile, skip_jit_warnings, jit_options)
elif (implementation == 'experimental'):
assert (automatic_transform_enabled is False)
if jit_compile:
warnings.warn('Will not JIT compile, unsupported for now.')
def lp_fn(input_dict):
excluded_nodes = set(['_INPUT', '_RETURN'])
for (key, value) in input_dict.items():
model_trace.nodes[key]['value'] = value
replayed_model = pyro.poutine.replay(model, model_trace)
log_p = 0
for trace_enum in iter_discrete_traces('flat', fn=replayed_model):
trace_enum.compute_log_prob()
for (node_name, node) in trace_enum.nodes.items():
if (node_name in excluded_nodes):
continue
if (node['log_prob'].ndim == 1):
log_p += trace_enum.nodes[node_name]['log_prob']
else:
log_p += trace_enum.nodes[node_name]['log_prob'].sum(dim=1)
return log_p
else:
raise NotImplementedError
return (lp_fn, transforms) |
def test_execute_shell_allowlist_should_allow(agent: Agent, random_string: str):
agent.config.shell_command_control = sut.ALLOWLIST_CONTROL
agent.config.shell_allowlist = ['echo']
result = sut.execute_shell(f"echo 'Hello {random_string}!'", agent)
assert (('Hello' in result) and (random_string in result))
assert ('Error' not in result) |
def eval_iou(model, val_loader, logdir=None, epoch=0):
model.eval()
total_intersects = 0
total_union = 0
process_intersects = 0
process_union = 0
directory = os.path.join(logdir, 'vis')
if (not os.path.isdir(directory)):
os.makedirs(directory)
nums = 0
min_area_threshold = 220
num_iter = 0
num_warmup = 5
pure_inf_time = 0
fps = 0
with torch.no_grad():
for (imgs, trans, rots, intrins, post_trans, post_rots, lidar_data, lidar_mask, car_trans, yaw_pitch_roll, semantic_gt, instance_gt, direction_gt, rec) in tqdm.tqdm(val_loader):
(semantic, embedding, direction) = model(imgs.cuda(), trans.cuda(), rots.cuda(), intrins.cuda(), post_trans.cuda(), post_rots.cuda(), lidar_data.cuda(), lidar_mask.cuda(), car_trans.cuda(), yaw_pitch_roll.cuda(), flag='testing')
segmentation = onehot_encoding(semantic)
segmentation = segmentation.squeeze(0)
oh_pred = segmentation.cpu().numpy()
processed_mask = []
processed_mask.append(oh_pred[0])
for i in range(1, oh_pred.shape[0]):
single_mask = oh_pred[i].astype('uint8')
binary_seg_result = np.array((single_mask * 255), dtype=np.uint8)
morphological_ret = _morphological_process(binary_seg_result, kernel_size=5)
connect_components_analysis_ret = _connect_components_analysis(image=morphological_ret)
labels = connect_components_analysis_ret[1]
stats = connect_components_analysis_ret[2]
for (index, stat) in enumerate(stats):
if (stat[4] <= min_area_threshold):
idx = np.where((labels == index))
morphological_ret[idx] = 0
processed_mask.append((morphological_ret / 255))
processed_mask = np.stack(processed_mask)
processed_mask = torch.from_numpy(processed_mask)
processed_mask = processed_mask.unsqueeze(0)
processed_mask = processed_mask.cuda().float()
semantic_gt = semantic_gt.cuda().float()
(intersects, union) = get_batch_iou(onehot_encoding(semantic), semantic_gt)
total_intersects += intersects
total_union += union
iou = (intersects / (union + 1e-07))
miou = iou[1:].mean()
(intersects, union) = get_batch_iou(processed_mask, semantic_gt)
process_intersects += intersects
process_union += union
return ((total_intersects / (total_union + 1e-07)), (process_intersects / (process_union + 1e-07)), fps) |
def clean_replace(s, r, t, forward=True, backward=False):
def clean_replace_single(s, r, t, forward, backward, sidx=0):
idx = s.find(r)
if (idx == (- 1)):
return (s, (- 1))
idx_r = (idx + len(r))
if backward:
while ((idx > 0) and s[(idx - 1)]):
idx -= 1
elif ((idx > 0) and (s[(idx - 1)] != ' ')):
return (s, (- 1))
if forward:
while ((idx_r < len(s)) and (s[idx_r].isalpha() or s[idx_r].isdigit())):
idx_r += 1
elif ((idx_r != len(s)) and (s[idx_r].isalpha() or s[idx_r].isdigit())):
return (s, (- 1))
return (((s[:idx] + t) + s[idx_r:]), idx_r)
sidx = 0
while (sidx != (- 1)):
(s, sidx) = clean_replace_single(s, r, t, forward, backward, sidx)
return s |
def main(argv):
args = get_arg_parser().parse_args(sys.argv[1:])
if (not args.data_dir):
print('Data directory invalid.')
return
if (not args.route_name):
args.route_name = os.path.basename(args.data_dir)
args.data_dir = os.path.dirname(args.data_dir)
route = Route(args.route_name, args.data_dir)
lr = MultiLogIterator(route.log_paths(), wraparound=False)
with open(args.out_path, 'wb') as f:
try:
done = False
i = 0
while (not done):
msg = next(lr)
if (not msg):
break
smsg = msg.as_builder()
typ = smsg.which()
if (typ == 'ubloxRaw'):
f.write(smsg.to_bytes())
i += 1
except StopIteration:
print('All done')
print('Writed {} msgs'.format(i)) |
def sample_from_categorical_distribution(batch_probs):
xp = chainer.cuda.get_array_module(batch_probs)
return xp.argmax((xp.log(batch_probs) + xp.random.gumbel(size=batch_probs.shape)), axis=1).astype(np.int32, copy=False) |
def conway_cross_product_doubled_power(self, p):
dim_list = [J.dim() for J in self.jordan_blocks_in_unimodular_list_by_scale_power(p)]
return sum(((((i - j) * dimi) * dim_list[j]) for (i, dimi) in enumerate(dim_list) for j in range(i))) |
def _Compare(t, symbols, inferred_symbols):
inf_type = _dispatch(t.left, symbols, inferred_symbols)
vec_len = None
if isinstance(inf_type, dtypes.vector):
vec_len = inf_type.veclen
for (o, e) in zip(t.ops, t.comparators):
if (o.__class__.__name__ not in cppunparse.CPPUnparser.cmpops):
continue
inf_type = _dispatch(e, symbols, inferred_symbols)
if isinstance(inf_type, dtypes.vector):
if ((vec_len is not None) and (vec_len != inf_type.veclen)):
raise SyntaxError('Inconsistent vector lengths in Compare')
vec_len = inf_type.veclen
return (dtypes.vector(dace.bool, vec_len) if (vec_len is not None) else dtypes.bool) |
def eg_req_func(protocols: List['EntanglementProtocol'], args: Arguments) -> 'EntanglementGenerationA':
name = args['name']
reservation = args['reservation']
for protocol in protocols:
if (isinstance(protocol, EntanglementGenerationA) and (protocol.remote_node_name == name) and (protocol.rule.get_reservation() == reservation)):
return protocol |
def test_plots():
plots = glob.glob('test_predictor_outputs/figures/*.png')
assert (len(plots) == 3) |
def sort_by_number_of_args(declaration, reverse=True):
def num_args(option):
return len(option['arguments'])
declaration['options'].sort(key=num_args, reverse=reverse) |
def count_congruence_solutions__good_type(self, p, k, m, zvec, nzvec):
return CountAllLocalTypesNaive(self, p, k, m, zvec, nzvec)[1] |
def test_input_error_related_to_feature_names():
pd = pytest.importorskip('pandas')
X = pd.DataFrame({'a': [0, 1, 2], 'b': [0, 1, 2]})
y = np.array([0, 1, 0])
monotonic_cst = {'d': 1, 'a': 1, 'c': (- 1)}
gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
expected_msg = re.escape("monotonic_cst contains 2 unexpected feature names: ['c', 'd'].")
with pytest.raises(ValueError, match=expected_msg):
gbdt.fit(X, y)
monotonic_cst = {k: 1 for k in 'abcdefghijklmnopqrstuvwxyz'}
gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
expected_msg = re.escape("monotonic_cst contains 24 unexpected feature names: ['c', 'd', 'e', 'f', 'g', '...'].")
with pytest.raises(ValueError, match=expected_msg):
gbdt.fit(X, y)
monotonic_cst = {'a': 1}
gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
expected_msg = re.escape('HistGradientBoostingRegressor was not fitted on data with feature names. Pass monotonic_cst as an integer array instead.')
with pytest.raises(ValueError, match=expected_msg):
gbdt.fit(X.values, y)
monotonic_cst = {'b': (- 1), 'a': '+'}
gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
expected_msg = re.escape("monotonic_cst['a'] must be either -1, 0 or 1. Got '+'.")
with pytest.raises(ValueError, match=expected_msg):
gbdt.fit(X, y) |
def transform_instance_annotations(annotation, transforms, image_size, *, keypoint_hflip_indices=None):
annotation = d2_transform_inst_anno(annotation, transforms, image_size, keypoint_hflip_indices=keypoint_hflip_indices)
if ('beziers' in annotation):
beziers = transform_beziers_annotations(annotation['beziers'], transforms)
annotation['beziers'] = beziers
if ('relation' in annotation):
relation = transform_relation_annotations(annotation['relation'], transforms)
annotation['relation'] = relation
return annotation |
def register_Ns3SpectrumPhyHelper_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SpectrumPhyHelper const &', 'arg0')])
cls.add_method('Create', 'ns3::Ptr< ns3::SpectrumPhy >', [param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::NetDevice >', 'device')], is_const=True)
cls.add_method('SetChannel', 'void', [param('ns3::Ptr< ns3::SpectrumChannel >', 'channel')])
cls.add_method('SetChannel', 'void', [param('std::string', 'channelName')])
cls.add_method('SetPhy', 'void', [param('std::string', 'name'), param('std::string', 'n0', default_value='""'), param('ns3::AttributeValue const &', 'v0', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n1', default_value='""'), param('ns3::AttributeValue const &', 'v1', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n2', default_value='""'), param('ns3::AttributeValue const &', 'v2', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n3', default_value='""'), param('ns3::AttributeValue const &', 'v3', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n4', default_value='""'), param('ns3::AttributeValue const &', 'v4', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n5', default_value='""'), param('ns3::AttributeValue const &', 'v5', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n6', default_value='""'), param('ns3::AttributeValue const &', 'v6', default_value='ns3::EmptyAttributeValue()'), param('std::string', 'n7', default_value='""'), param('ns3::AttributeValue const &', 'v7', default_value='ns3::EmptyAttributeValue()')])
cls.add_method('SetPhyAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'v')])
return |
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
cls.add_method('Cleanup', 'void', [], is_static=True)
return |
class TwoLayerRio(nn.Module):
def __init__(self, num_classes, inter1, inter2, lambda_1, last_label_scores):
super(TwoLayerRio, self).__init__()
self.inter1 = inter1
self.inter2 = inter2
self.xent = nn.CrossEntropyLoss()
self.weight = nn.Parameter(torch.FloatTensor(num_classes, inter2.embed_dim))
init.xavier_uniform_(self.weight)
self.lambda_1 = lambda_1
self.last_label_scores = last_label_scores
def forward(self, nodes, labels, train_flag=True):
label_scores_one = self.last_label_scores
(embeds2, label_scores_two) = self.inter2(nodes, labels, train_flag)
scores2 = self.weight.mm(embeds2)
return (scores2.t(), label_scores_one, label_scores_two)
def to_prob(self, nodes, labels, train_flag=True):
(gnn_logits2, label_logits_one, label_logits_two) = self.forward(nodes, labels, train_flag)
gnn_scores2 = torch.sigmoid(gnn_logits2)
label_scores_one = torch.sigmoid(label_logits_one)
label_scores_two = torch.sigmoid(label_logits_two)
return (gnn_scores2, label_scores_one, label_scores_two)
def loss(self, nodes, labels, train_flag=True):
(gnn_scores2, label_scores_one, label_scores_two) = self.forward(nodes, labels, train_flag)
label_loss_one = self.xent(label_scores_one, labels.squeeze())
label_loss_two = self.xent(label_scores_two, labels.squeeze())
gnn_loss2 = self.xent(gnn_scores2, labels.squeeze())
final_loss = (gnn_loss2 + (self.lambda_1 * label_loss_one))
return final_loss |
class PygLinkPropPredDataset(InMemoryDataset):
def __init__(self, name, root='dataset', transform=None, pre_transform=None, meta_dict=None):
self.name = name
if (meta_dict is None):
self.dir_name = '_'.join(name.split('-'))
if osp.exists(osp.join(root, (self.dir_name + '_pyg'))):
self.dir_name = (self.dir_name + '_pyg')
self.original_root = root
self.root = osp.join(root, self.dir_name)
master = pd.read_csv(os.path.join(os.path.dirname(__file__), 'master.csv'), index_col=0, keep_default_na=False)
if (not (self.name in master)):
error_mssg = 'Invalid dataset name {}.\n'.format(self.name)
error_mssg += 'Available datasets are as follows:\n'
error_mssg += '\n'.join(master.keys())
raise ValueError(error_mssg)
self.meta_info = master[self.name]
else:
self.dir_name = meta_dict['dir_path']
self.original_root = ''
self.root = meta_dict['dir_path']
self.meta_info = meta_dict
if (osp.isdir(self.root) and (not osp.exists(osp.join(self.root, (('RELEASE_v' + str(self.meta_info['version'])) + '.txt'))))):
print((self.name + ' has been updated.'))
if (input('Will you update the dataset now? (y/N)\n').lower() == 'y'):
shutil.rmtree(self.root)
self.download_name = self.meta_info['download_name']
self.task_type = self.meta_info['task type']
self.eval_metric = self.meta_info['eval metric']
self.is_hetero = (self.meta_info['is hetero'] == 'True')
self.binary = (self.meta_info['binary'] == 'True')
super(PygLinkPropPredDataset, self).__init__(self.root, transform, pre_transform)
(self.data, self.slices) = torch.load(self.processed_paths[0])
def get_edge_split(self, split_type=None):
if (split_type is None):
split_type = self.meta_info['split']
path = osp.join(self.root, 'split', split_type)
if os.path.isfile(os.path.join(path, 'split_dict.pt')):
return torch.load(os.path.join(path, 'split_dict.pt'))
train = replace_numpy_with_torchtensor(torch.load(osp.join(path, 'train.pt')))
valid = replace_numpy_with_torchtensor(torch.load(osp.join(path, 'valid.pt')))
test = replace_numpy_with_torchtensor(torch.load(osp.join(path, 'test.pt')))
return {'train': train, 'valid': valid, 'test': test}
def raw_file_names(self):
if self.binary:
if self.is_hetero:
return ['edge_index_dict.npz']
else:
return ['data.npz']
elif self.is_hetero:
return ['num-node-dict.csv.gz', 'triplet-type-list.csv.gz']
else:
file_names = ['edge']
if (self.meta_info['has_node_attr'] == 'True'):
file_names.append('node-feat')
if (self.meta_info['has_edge_attr'] == 'True'):
file_names.append('edge-feat')
return [(file_name + '.csv.gz') for file_name in file_names]
def processed_file_names(self):
return osp.join('geometric_data_processed.pt')
def download(self):
url = self.meta_info['url']
if decide_download(url):
path = download_url(url, self.original_root)
extract_zip(path, self.original_root)
os.unlink(path)
shutil.rmtree(self.root)
shutil.move(osp.join(self.original_root, self.download_name), self.root)
else:
print('Stop downloading.')
shutil.rmtree(self.root)
exit((- 1))
def process(self):
add_inverse_edge = (self.meta_info['add_inverse_edge'] == 'True')
if (self.meta_info['additional node files'] == 'None'):
additional_node_files = []
else:
additional_node_files = self.meta_info['additional node files'].split(',')
if (self.meta_info['additional edge files'] == 'None'):
additional_edge_files = []
else:
additional_edge_files = self.meta_info['additional edge files'].split(',')
if self.is_hetero:
data = read_heterograph_pyg(self.raw_dir, add_inverse_edge=add_inverse_edge, additional_node_files=additional_node_files, additional_edge_files=additional_edge_files, binary=self.binary)[0]
else:
data = read_graph_pyg(self.raw_dir, add_inverse_edge=add_inverse_edge, additional_node_files=additional_node_files, additional_edge_files=additional_edge_files, binary=self.binary)[0]
data = (data if (self.pre_transform is None) else self.pre_transform(data))
print('Saving...')
torch.save(self.collate([data]), self.processed_paths[0])
def __repr__(self):
return '{}()'.format(self.__class__.__name__) |
def get_optimizer(opt, model):
if (opt.optim == 'adam'):
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
elif (opt.optim == 'sgd'):
print('Using SGD')
optimizer = torch.optim.SGD(model.parameters(), opt.lr, momentum=0.9, weight_decay=0.0001)
else:
assert 0, opt.optim
return optimizer |
def fromqpixmap(im):
from . import ImageQt
if (not ImageQt.qt_is_installed):
raise ImportError('Qt bindings are not installed')
return ImageQt.fromqpixmap(im) |
def distorted_bounding_box_crop(image, bbox, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0), max_attempts=100, scope=None):
with tf.name_scope((scope or 'distorted_bounding_box_crop')):
shape = tf.shape(image)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(shape, bounding_boxes=bbox, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=True)
(bbox_begin, bbox_size, _) = sample_distorted_bounding_box
(offset_y, offset_x, _) = tf.unstack(bbox_begin)
(target_height, target_width, _) = tf.unstack(bbox_size)
image = tf.image.crop_to_bounding_box(image, offset_y, offset_x, target_height, target_width)
return image |
def replace_xml_entities(text):
for (c, r) in xml_entities.items():
text = text.replace(c, r)
return text |
class IntegerModFactory(UniqueFactory):
def get_object(self, version, key, extra_args):
out = super().get_object(version, key, extra_args)
category = extra_args.get('category', None)
if (category is not None):
out._refine_category_(category)
out._factory_data[3]['category'] = category
return out
def create_key_and_extra_args(self, order=0, is_field=False, category=None):
if is_field:
from sage.categories.fields import Fields
return (order, {'category': Fields()})
return (order, {})
def create_object(self, version, order, **kwds):
if isinstance(order, tuple):
(order, category) = order
kwds.setdefault('category', category)
if (order < 0):
order = (- order)
if (order == 0):
return integer_ring.IntegerRing(**kwds)
else:
return IntegerModRing_generic(order, **kwds) |
def encode_param_command(args, **kwargs):
in_files = [f for f in os.listdir(args.indir) if os.path.isfile(os.path.join(args.indir, f))]
logger.log(99, 'Loading parameters...')
for file_path in in_files:
key = urllib.parse.unquote(os.path.splitext(file_path)[0].replace('~', '/'))
logger.log(99, key)
load_param_in_txt(key, os.path.join(args.indir, file_path))
logger.log(99, 'Saving parameters...')
save_parameters(args.param)
logger.log(99, 'Encode Parameter Completed.')
return True |
def test_slice():
with goos.OptimizationPlan() as plan:
x = goos.Variable([[0, 1, 2, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25]])
t = goos.Slice(x, ['c', 'c'])
np.testing.assert_allclose(t.get().array, 13)
g = np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
np.testing.assert_allclose(t.get_grad([x])[0].array_grad, g)
t = goos.Slice(x, [[1, 4], 'c'])
np.testing.assert_allclose(t.get().array, [[8], [13], [18]])
g = np.array([[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]])
np.testing.assert_allclose(t.get_grad([x])[0].array_grad, g)
t = goos.Slice(x, [3, [1, 3]])
np.testing.assert_allclose(t.get().array, [[17, 18]])
g = np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 1, 0, 0], [0, 0, 0, 0, 0]])
np.testing.assert_allclose(t.get_grad([x])[0].array_grad, g)
t = goos.Slice(x, [3, None])
np.testing.assert_allclose(t.get().array, [[16, 17, 18, 19, 20]])
g = np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [0, 0, 0, 0, 0]])
np.testing.assert_allclose(t.get_grad([x])[0].array_grad, g) |
.parametrize('ty,num', add_table)
_utils.test(arch=[ti.cpu, ti.cuda, ti.vulkan], debug=True)
def test_add_no_overflow(capfd, ty, num):
if (not supports_overflow(ti.lang.impl.current_cfg().arch)):
return
capfd.readouterr()
def foo() -> ty:
a = ty(num)
b = ty((num - 1))
return (a + b)
foo()
ti.sync()
captured = capfd.readouterr().out
assert ('Addition overflow detected' not in captured)
assert ('return a + b' not in captured) |
class UnusedParamTwoLinLayerNet(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Linear(10, 10, bias=False)
self.b = nn.Linear(10, 10, bias=False)
self.c = nn.Linear(5, 5, bias=False)
def forward(self, x):
a = self.a(x)
b = self.b(x)
return (a, b) |
def pt_repeat_n_times(niters):
for _ in range(niters):
for (input_tensor, repeat) in zip(input_tensors, repeats):
pt_repeat(input_tensor, repeat) |
def import_loader(opt):
dataset_name = (opt.model_task.upper() + 'Data')
dataset = getattr(import_module('data'), dataset_name)
if (opt.task == 'train'):
train_inp_path = opt.config['train']['train_inp']
train_gt_path = opt.config['train']['train_gt']
valid_inp_path = opt.config['train']['valid_inp']
valid_gt_path = opt.config['train']['valid_gt']
train_data = dataset(opt, train_inp_path, train_gt_path)
if (opt.model_task == 'sr'):
valid_data = dataset(opt, valid_inp_path, valid_gt_path, 'valid')
else:
valid_data = dataset(opt, valid_inp_path, valid_gt_path)
train_loader = data.DataLoader(train_data, batch_size=opt.config['train']['batch_size'], shuffle=True, num_workers=opt.config['train']['num_workers'], drop_last=True)
valid_loader = data.DataLoader(valid_data, batch_size=1, shuffle=False, num_workers=opt.config['train']['num_workers'], drop_last=False)
return (train_loader, valid_loader)
elif (opt.task == 'test'):
inp_test_path = opt.config['test']['test_inp']
gt_test_path = opt.config['test']['test_gt']
test_data = dataset(opt, inp_test_path, gt_test_path)
test_loader = data.DataLoader(test_data, batch_size=1, shuffle=False, num_workers=opt.config['test']['num_workers'], drop_last=False)
return test_loader
elif (opt.task == 'demo'):
inp_demo_path = opt.config['demo']['demo_inp']
demo_data = dataset(opt, inp_demo_path)
demo_loader = data.DataLoader(demo_data, batch_size=1, shuffle=False, num_workers=opt.config['demo']['num_workers'], drop_last=False)
return demo_loader
else:
raise ValueError('unknown task, please choose from [train, test, demo]') |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--train_corpus', default=None, type=str, required=True, help='The input train corpus.')
parser.add_argument('--bert_model', default=None, type=str, required=True, help='Bert pre-trained model selected in the list: bert-base-uncased, bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.')
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints will be written.')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.')
parser.add_argument('--do_mask', action='store_true', help='Whether to use mask words in columns')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--train_batch_size', default=32, type=int, help='Total batch size for training.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--num_train_epochs', default=40, type=int, help='Total number of training epochs to perform.')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.')
parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--on_memory', action='store_true', help='Whether to load train samples into memory or use disk')
parser.add_argument('--do_lower_case', action='store_true', help='Whether to lower case the input text. True for uncased models, False for cased models.')
parser.add_argument('--local_rank', type=int, default=(- 1), help='local_rank for distributed training on gpus')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumualte before performing a backward/update pass.')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit float precision instead of 32-bit')
parser.add_argument('--loss_scale', type=float, default=0, help='Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n0 (default value): dynamic loss scaling.\nPositive power of 2: static loss scaling value.\n')
parser.add_argument('--logfile', type=str, default='log.txt')
args = parser.parse_args()
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
if (args.gradient_accumulation_steps < 1):
raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(args.gradient_accumulation_steps))
args.train_batch_size = (args.train_batch_size // args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (n_gpu > 0):
torch.cuda.manual_seed_all(args.seed)
if (not args.do_train):
raise ValueError('Training is currently the only implemented execution option. Please set `do_train`.')
if (os.path.exists(args.output_dir) and os.listdir(args.output_dir)):
raise ValueError('Output directory ({}) already exists and is not empty.'.format(args.output_dir))
if (not os.path.exists(args.output_dir)):
os.makedirs(args.output_dir)
log = Logger(os.path.join(args.output_dir, args.logfile), 'w')
log.put('device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}, num train epochs: {}'.format(device, n_gpu, bool((args.local_rank != (- 1))), args.fp16, args.num_train_epochs))
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
num_train_optimization_steps = None
if args.do_train:
log.put(('Loading Train Dataset: ' + args.train_corpus))
train_dataset = BERTDataset(args.train_corpus, tokenizer, seq_len=args.max_seq_length, corpus_lines=None, on_memory=args.on_memory)
num_train_optimization_steps = (int(((len(train_dataset) / args.train_batch_size) / args.gradient_accumulation_steps)) * args.num_train_epochs)
if (args.local_rank != (- 1)):
num_train_optimization_steps = (num_train_optimization_steps // torch.distributed.get_world_size())
model = BertForPreTraining.from_pretrained(args.bert_model)
if args.fp16:
model.half()
model.to(device)
if (args.local_rank != (- 1)):
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError('Please install apex from to use distributed and fp16 training.')
model = DDP(model)
elif (n_gpu > 1):
model = torch.nn.DataParallel(model)
if args.do_train:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError('Please install apex from to use distributed and fp16 training.')
optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0)
if (args.loss_scale == 0):
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion, t_total=num_train_optimization_steps)
else:
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps)
global_step = 0
if args.do_train:
log.put('***** Running training *****')
log.put((' Num examples = ' + str(len(train_dataset))))
log.put((' Batch size = ' + str(args.train_batch_size)))
log.put((' Num steps = ' + str(num_train_optimization_steps)))
log.put((' Using MLM = ' + str(args.do_mask)))
if (args.local_rank == (- 1)):
train_sampler = RandomSampler(train_dataset)
else:
train_sampler = DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for epoch in range(args.num_train_epochs):
log.put(('\nEpoch:\t' + str(epoch)))
tr_loss = 0.0
(nb_tr_examples, nb_tr_steps) = (0, 0)
total_steps = len(train_dataloader)
step_check = int((len(train_dataloader) * 0.1))
for (step, batch) in enumerate(train_dataloader):
batch = tuple((t.to(device) for t in batch))
(input_ids, input_mask, segment_ids, lm_label_ids, col_label_ids) = batch
if (not args.do_mask):
lm_label_ids = None
loss = model(input_ids, segment_ids, input_mask, lm_label_ids, col_label_ids)
if (n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if ((step % step_check) == 0):
log.put(('Finishing training for current epoch:\t' + str(round((step / total_steps), 3))))
mean_loss = ((tr_loss * args.gradient_accumulation_steps) / nb_tr_steps)
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
lr_this_step = (args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion))
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
epoch_loss = (tr_loss / len(train_dataloader))
log.put(('Train epoch loss:\t' + str(epoch_loss)))
if ((((epoch % 5) == 0) or (epoch == 2)) and (torch.distributed.get_rank() == 0)):
log.put(('** ** * Saving fine-tuned model for epoch ' + str(epoch)))
model_to_save = (model.module if hasattr(model, 'module') else model)
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME.replace('.bin', (('_' + str(epoch)) + '.bin')))
output_config_file = os.path.join(args.output_dir, CONFIG_NAME.replace('.json', (('_' + str(epoch)) + '.json')))
if args.do_train:
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir) |
def get_split_list(in_dim, child_num):
in_dim_list = ([(in_dim // child_num)] * child_num)
for _i in range((in_dim % child_num)):
in_dim_list[_i] += 1
return in_dim_list |
def cached_path(url_or_filename, cache_dir=None):
if (cache_dir is None):
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if ((sys.version_info[0] == 3) and isinstance(url_or_filename, Path)):
url_or_filename = str(url_or_filename)
if ((sys.version_info[0] == 3) and isinstance(cache_dir, Path)):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if (parsed.scheme in (' ' 's3')):
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
return url_or_filename
elif (parsed.scheme == ''):
raise EnvironmentError('file {} not found'.format(url_or_filename))
else:
raise ValueError('unable to parse {} as a URL or as a local path'.format(url_or_filename)) |
class CopyCheckTester(unittest.TestCase):
def setUp(self):
self.transformer_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, 'models/bert/'))
check_copies.TRANSFORMER_PATH = self.transformer_dir
shutil.copy(os.path.join(git_repo_path, 'src/transformers/models/bert/modeling_bert.py'), os.path.join(self.transformer_dir, 'models/bert/modeling_bert.py'))
def tearDown(self):
check_copies.TRANSFORMER_PATH = 'src/transformers'
shutil.rmtree(self.transformer_dir)
def check_copy_consistency(self, comment, class_name, class_code, overwrite_result=None):
code = ((comment + f'''
class {class_name}(nn.Module):
''') + class_code)
if (overwrite_result is not None):
expected = ((comment + f'''
class {class_name}(nn.Module):
''') + overwrite_result)
mode = black.Mode(target_versions={black.TargetVersion.PY35}, line_length=119)
code = black.format_str(code, mode=mode)
fname = os.path.join(self.transformer_dir, 'new_code.py')
with open(fname, 'w', newline='\n') as f:
f.write(code)
if (overwrite_result is None):
self.assertTrue((len(check_copies.is_copy_consistent(fname)) == 0))
else:
check_copies.is_copy_consistent(f.name, overwrite=True)
with open(fname, 'r') as f:
self.assertTrue(f.read(), expected)
def test_find_code_in_transformers(self):
code = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead')
self.assertEqual(code, REFERENCE_CODE)
def test_is_copy_consistent(self):
self.check_copy_consistency('# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead', 'BertLMPredictionHead', (REFERENCE_CODE + '\n'))
self.check_copy_consistency('# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead', 'BertLMPredictionHead', REFERENCE_CODE)
self.check_copy_consistency('# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel', 'TestModelLMPredictionHead', re.sub('Bert', 'TestModel', REFERENCE_CODE))
long_class_name = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}', f'{long_class_name}LMPredictionHead', re.sub('Bert', long_class_name, REFERENCE_CODE))
self.check_copy_consistency('# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel', 'TestModelLMPredictionHead', REFERENCE_CODE, overwrite_result=re.sub('Bert', 'TestModel', REFERENCE_CODE))
def test_convert_to_localized_md(self):
localized_readme = check_copies.LOCALIZED_READMES['README_zh-hans.md']
md_list = '1. **[ALBERT]( (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations]( by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1. **[DistilBERT]( (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter]( by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2]( RoBERTa into [DistilRoBERTa]( Multilingual BERT into [DistilmBERT]( and a German version of DistilBERT.\n1. **[ELECTRA]( (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators]( by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.'
localized_md_list = '1. **[ALBERT]( ( Google Research and the Toyota Technological Institute at Chicago) [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations]( Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut \n'
converted_md_list_sample = '1. **[ALBERT]( ( Google Research and the Toyota Technological Institute at Chicago) [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations]( Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut \n1. **[DistilBERT]( ( HuggingFace) [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter]( Victor Sanh, Lysandre Debut and Thomas Wolf The same method has been applied to compress GPT2 into [DistilGPT2]( RoBERTa into [DistilRoBERTa]( Multilingual BERT into [DistilmBERT]( and a German version of DistilBERT.\n1. **[ELECTRA]( ( Google Research/Stanford University) [ELECTRA: Pre-training text encoders as discriminators rather than generators]( Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning \n'
(num_models_equal, converted_md_list) = check_copies.convert_to_localized_md(md_list, localized_md_list, localized_readme['format_model_list'])
self.assertFalse(num_models_equal)
self.assertEqual(converted_md_list, converted_md_list_sample)
(num_models_equal, converted_md_list) = check_copies.convert_to_localized_md(md_list, converted_md_list, localized_readme['format_model_list'])
self.assertTrue(num_models_equal)
link_changed_md_list = '1. **[ALBERT]( (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations]( by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
link_unchanged_md_list = '1. **[ALBERT]( ( Google Research and the Toyota Technological Institute at Chicago) [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations]( Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut \n'
converted_md_list_sample = '1. **[ALBERT]( ( Google Research and the Toyota Technological Institute at Chicago) [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations]( Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut \n'
(num_models_equal, converted_md_list) = check_copies.convert_to_localized_md(link_changed_md_list, link_unchanged_md_list, localized_readme['format_model_list'])
self.assertEqual(converted_md_list, converted_md_list_sample) |
_cmd.command('client')
('-d', '--discoverhost', required=False, help='Hostname for discovery services(reducer).')
('-p', '--discoverport', required=False, help='Port for discovery services (reducer).')
('--token', required=False, help='Set token provided by reducer if enabled')
('-n', '--name', required=False, default=('client' + str(uuid.uuid4())[:8]))
('-i', '--client_id', required=False)
('--local-package', is_flag=True, help='Enable local compute package')
('--force-ssl', is_flag=True, help='Force SSL/TLS for REST service')
('-u', '--dry-run', required=False, default=False)
('-s', '--secure', required=False, default=False)
('-pc', '--preshared-cert', required=False, default=False)
('-v', '--verify', is_flag=True, help='Verify SSL/TLS for REST service')
('-c', '--preferred-combiner', required=False, default=False)
('-va', '--validator', required=False, default=True)
('-tr', '--trainer', required=False, default=True)
('-in', '--init', required=False, default=None, help='Set to a filename to (re)init client from file state.')
('-l', '--logfile', required=False, default=None, help='Set logfile for client log to file.')
('--heartbeat-interval', required=False, default=2)
('--reconnect-after-missed-heartbeat', required=False, default=30)
('--verbosity', required=False, default='INFO', type=click.Choice(['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'], case_sensitive=False))
_context
def client_cmd(ctx, discoverhost, discoverport, token, name, client_id, local_package, force_ssl, dry_run, secure, preshared_cert, verify, preferred_combiner, validator, trainer, init, logfile, heartbeat_interval, reconnect_after_missed_heartbeat, verbosity):
remote = (False if local_package else True)
config = {'discover_host': discoverhost, 'discover_port': discoverport, 'token': token, 'name': name, 'client_id': client_id, 'remote_compute_context': remote, 'force_ssl': force_ssl, 'dry_run': dry_run, 'secure': secure, 'preshared_cert': preshared_cert, 'verify': verify, 'preferred_combiner': preferred_combiner, 'validator': validator, 'trainer': trainer, 'init': init, 'logfile': logfile, 'heartbeat_interval': heartbeat_interval, 'reconnect_after_missed_heartbeat': reconnect_after_missed_heartbeat, 'verbosity': verbosity}
if init:
apply_config(config)
validate_client_config(config)
client = Client(config)
client.run() |
def get_number_of_leaves_from_tree(alidir):
stdout = get_command_stdout('tree-info {0}/tree 2>/dev/null | grep num-pdfs'.format(alidir))
parts = stdout.split()
assert (parts[0] == 'num-pdfs')
num_leaves = int(parts[1])
if (num_leaves == 0):
raise Exception('Number of leaves is 0')
return num_leaves |
.parametrize('deriv_type', ('sigma', 'h', 'both'))
def test_adjoint(deriv_type):
n_layer = 4
np.random.seed(40)
log_cond = np.random.rand(n_layer)
log_thick = np.random.rand((n_layer - 1))
if (deriv_type != 'h'):
sigma_map = maps.ExpMap()
model = log_cond
sigma = None
else:
sigma = np.exp(log_cond)
sigma_map = None
if (deriv_type != 'sigma'):
h_map = maps.ExpMap()
model = log_thick
h = None
else:
h_map = None
h = np.exp(log_thick)
if (deriv_type == 'both'):
wire = maps.Wires(('sigma', n_layer), ('thick', (n_layer - 1)))
sigma_map = (sigma_map * wire.sigma)
h_map = (h_map * wire.thick)
model = np.r_[(log_cond, log_thick)]
survey = get_survey('volt', 'd', 'd')
simulation = dc.Simulation1DLayers(survey=survey, sigma=sigma, sigmaMap=sigma_map, thicknesses=h, thicknessesMap=h_map)
def J(v):
return simulation.Jvec(model, v)
def JT(v):
return simulation.Jtvec(model, v)
assert_isadjoint(J, JT, len(model), survey.nD) |
def save_gray_numpy(data, index=0):
while (len(data.shape) > 2):
data = data[0]
if ((data.max() <= 1) and (data.min() >= 0)):
data = (data * 255)
out_path = os.path.join(str(folder), ('%05d.png' % index))
cv2.imwrite(out_path, data)
print(('Saved debug image to ' + out_path)) |
def flops_analysis_options(output_dir):
options = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy()
options['select'] = ['float_ops', 'micros', 'device']
options['min_float_ops'] = 1
options['order_by'] = 'float_ops'
options['account_type_regexes'] = ['.*']
if output_dir:
options['dump_to_file'] = os.path.join(output_dir, 'flops.txt')
return ('scope', options) |
def find_usages(query_att, query_file, lst_key_att, key_file):
usages = []
query_str = get_string(query_file, query_att[0], query_att[1])
for key_att in lst_key_att:
key_str = get_string(key_file, key_att[0], key_att[1])
if (key_str == query_str):
usages.append(key_att)
return usages |
()
('--seed', default=1)
('--epochs', default=500)
('--batch_size', default=1024)
_experiment(snapshot_mode='all')
def mtppo_metaworld_ml1_push(ctxt, seed, epochs, batch_size):
set_seed(seed)
env = GarageEnv(normalize(mwb.ML1.get_train_tasks('push-v1')))
policy = GaussianMLPPolicy(env_spec=env.spec, hidden_nonlinearity=torch.tanh, output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec, hidden_nonlinearity=torch.tanh, output_nonlinearity=None)
algo = PPO(env_spec=env.spec, policy=policy, value_function=value_function, max_path_length=128, discount=0.99, gae_lambda=0.95, center_adv=True, lr_clip_range=0.2)
runner = LocalRunner(ctxt)
runner.setup(algo, env)
runner.train(n_epochs=epochs, batch_size=batch_size) |
class IterMeter(object):
def __init__(self):
self.val = 0
def step(self):
self.val += 1
def get(self):
return self.val |
def to_tensor(wrapped_func):
def func(*args, **kwargs):
result = wrapped_func(*args, **kwargs)
return {k: torch.tensor(v, dtype=torch.float) for (k, v) in result.items()}
return func |
def _test_func2d_nograd(x):
f = (((cos(((14.5 * x[0]) - 0.3)) + ((x[1] + 0.2) * x[1])) + ((x[0] + 0.2) * x[0])) + 1.)
return f |
class ImageClassifierCLI(CLI):
def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
super().add_arguments_to_parser(parser)
parser.link_arguments('data.num_classes', 'model.num_classes', apply_on='instantiate')
parser.link_arguments('data.image_shape', 'model.image_shape', apply_on='instantiate')
parser.set_defaults({'experiment': 'ecg', 'model.num_frequency_bands': 32, 'model.num_latents': 32, 'model.num_latent_channels': 128, 'model.encoder.num_layers': 3, 'model.encoder.num_self_attention_layers_per_block': 3, 'model.decoder.num_cross_attention_heads': 1, 'model.scorer': 'f1_macro'}) |
class TapasForMaskedLM():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def register_Ns3UanPhyListener_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::UanPhyListener const &', 'arg0')])
cls.add_method('NotifyCcaEnd', 'void', [], is_pure_virtual=True, is_virtual=True)
cls.add_method('NotifyCcaStart', 'void', [], is_pure_virtual=True, is_virtual=True)
cls.add_method('NotifyRxEndError', 'void', [], is_pure_virtual=True, is_virtual=True)
cls.add_method('NotifyRxEndOk', 'void', [], is_pure_virtual=True, is_virtual=True)
cls.add_method('NotifyRxStart', 'void', [], is_pure_virtual=True, is_virtual=True)
cls.add_method('NotifyTxStart', 'void', [param('ns3::Time', 'duration')], is_pure_virtual=True, is_virtual=True)
return |
def _do_python_eval_corloc(json_dataset, salt, output_dir='output'):
info = voc_info(json_dataset)
year = info['year']
anno_path = info['anno_path']
image_set_path = info['image_set_path']
devkit_path = info['devkit_path']
cachedir = os.path.join(devkit_path, 'annotations_cache')
corlocs = []
too_min_rates = []
use_07_metric = (True if (int(year) < 2010) else False)
logger.info(('VOC07 metric? ' + ('Yes' if use_07_metric else 'No')))
if (not os.path.isdir(output_dir)):
os.mkdir(output_dir)
for (_, cls) in enumerate(json_dataset.classes):
if (cls == '__background__'):
continue
filename = _get_voc_results_file_template(json_dataset, salt).format(cls)
(corloc, too_min_rate) = voc_eval_corloc(filename, anno_path, image_set_path, cls, cachedir, ovthresh=0.5, use_07_metric=use_07_metric)
corlocs += [corloc]
too_min_rates += [too_min_rate]
logger.info('CorLoc for {} = {:.4f}'.format(cls, corloc))
logger.info('too_min_rate for {} = {:.4f}'.format(cls, too_min_rate))
res_file = os.path.join(output_dir, (cls + '_corloc.pkl'))
save_object({'corloc': corloc}, res_file)
logger.info('Mean CorLoc = {:.4f}'.format(np.mean(corlocs)))
logger.info('Mean too_min_rate = {:.4f}'.format(np.mean(too_min_rates)))
logger.info('')
logger.info('Results:')
for corloc in corlocs:
logger.info('{:.3f}'.format(corloc))
logger.info('{:.3f}'.format(np.mean(corlocs)))
logger.info('')
logger.info('')
logger.info('')
logger.info('Results computed with the **unofficial** Python eval code.')
logger.info('Results should be very close to the official MATLAB code.')
logger.info('Use `./tools/reval.py --matlab ...` for your paper.')
logger.info('-- Thanks, The Management')
logger.info('') |
class HTTPVersionNotSupported(HTTPException):
code = 505
description = 'The server does not support the HTTP protocol version used in the request.' |
class MultiWozEvaluator(BaseEvaluator):
def __init__(self, data_name):
self.data_name = data_name
self.slot_dict = delex.prepareSlotValuesIndependent()
self.delex_dialogues = json.load(open('resources/multi-woz-2.1/delex.json', 'r'))
self.db = MultiWozDB()
self.labels = list()
self.hyps = list()
self.venues = json.load(open('resources/all_venues.json', 'r'))
def add_example(self, ref, hyp):
self.labels.append(ref)
self.hyps.append(hyp)
def _parseGoal(self, goal, d, domain):
goal[domain] = {}
goal[domain] = {'informable': [], 'requestable': [], 'booking': []}
if ('info' in d['goal'][domain]):
if (domain == 'train'):
if ('book' in d['goal'][domain]):
goal[domain]['requestable'].append('reference')
if ('reqt' in d['goal'][domain]):
if ('trainID' in d['goal'][domain]['reqt']):
goal[domain]['requestable'].append('id')
else:
if ('reqt' in d['goal'][domain]):
for s in d['goal'][domain]['reqt']:
if (s in ['phone', 'address', 'postcode', 'reference', 'id']):
goal[domain]['requestable'].append(s)
if ('book' in d['goal'][domain]):
goal[domain]['requestable'].append('reference')
goal[domain]['informable'] = d['goal'][domain]['info']
if ('book' in d['goal'][domain]):
goal[domain]['booking'] = d['goal'][domain]['book']
return goal
def _evaluateGeneratedDialogue(self, dialname, dial, goal, realDialogue, real_requestables, soft_acc=False):
requestables = ['phone', 'address', 'postcode', 'reference', 'id']
provided_requestables = {}
venue_offered = {}
domains_in_goal = []
for domain in goal.keys():
venue_offered[domain] = []
provided_requestables[domain] = []
domains_in_goal.append(domain)
m_targetutt = [turn['text'] for (idx, turn) in enumerate(realDialogue['log']) if ((idx % 2) == 1)]
pred_beliefs = dial['beliefs']
target_beliefs = dial['target_beliefs']
pred_responses = dial['responses']
for (t, (sent_gpt, sent_t)) in enumerate(zip(pred_responses, m_targetutt)):
for domain in goal.keys():
if (((('[' + domain) + '_name]') in sent_gpt) or ('_id' in sent_gpt)):
if (domain in ['restaurant', 'hotel', 'attraction', 'train']):
if (domain not in pred_beliefs):
venues = []
else:
pred_beliefs = remove_model_mismatch_and_db_data(dialname, target_beliefs, pred_beliefs[t], domain, t)
venues = self.db.queryResultVenues(domain, pred_beliefs[t][domain], real_belief=True)
if ((len(venue_offered[domain]) == 0) and venues):
venue_offered[domain] = venues
else:
flag = False
for ven in venues:
if (venue_offered[domain][0] == ven):
flag = True
break
if ((not flag) and venues):
venue_offered[domain] = venues
else:
venue_offered[domain] = (('[' + domain) + '_name]')
for requestable in requestables:
if (requestable == 'reference'):
if ((domain + '_reference') in sent_gpt):
if ('restaurant_reference' in sent_gpt):
if (realDialogue['log'][(t * 2)]['db_pointer'][(- 5)] == 1):
provided_requestables[domain].append('reference')
elif ('hotel_reference' in sent_gpt):
if (realDialogue['log'][(t * 2)]['db_pointer'][(- 3)] == 1):
provided_requestables[domain].append('reference')
elif ('train_reference' in sent_gpt):
if (realDialogue['log'][(t * 2)]['db_pointer'][(- 1)] == 1):
provided_requestables[domain].append('reference')
else:
provided_requestables[domain].append('reference')
elif ((((domain + '_') + requestable) + ']') in sent_gpt):
provided_requestables[domain].append(requestable)
for domain in goal.keys():
if ('info' in realDialogue['goal'][domain]):
if ('name' in realDialogue['goal'][domain]['info']):
venue_offered[domain] = (('[' + domain) + '_name]')
if (domain in ['taxi', 'police', 'hospital']):
venue_offered[domain] = (('[' + domain) + '_name]')
if ((domain == 'train') and ((not venue_offered[domain]) and ('id' not in goal['train']['requestable']))):
venue_offered[domain] = (('[' + domain) + '_name]')
"\n Given all inform and requestable slots\n we go through each domain from the user goal\n and check whether right entity was provided and\n all requestable slots were given to the user.\n The dialogue is successful if that's the case for all domains.\n "
stats = {'restaurant': [0, 0, 0], 'hotel': [0, 0, 0], 'attraction': [0, 0, 0], 'train': [0, 0, 0], 'taxi': [0, 0, 0], 'hospital': [0, 0, 0], 'police': [0, 0, 0]}
match = 0
success = 0
for domain in goal.keys():
match_stat = 0
if (domain in ['restaurant', 'hotel', 'attraction', 'train']):
goal_venues = self.db.queryResultVenues(domain, goal[domain]['informable'], real_belief=True)
if ((type(venue_offered[domain]) is str) and ('_name' in venue_offered[domain])):
match += 1
match_stat = 1
elif ((len(venue_offered[domain]) > 0) and (venue_offered[domain][0] in goal_venues)):
match += 1
match_stat = 1
elif ((domain + '_name]') in venue_offered[domain]):
match += 1
match_stat = 1
stats[domain][0] = match_stat
stats[domain][2] = 1
if soft_acc:
match = (float(match) / len(goal.keys()))
elif (match == len(goal.keys())):
match = 1.0
else:
match = 0.0
if (match == 1.0):
for domain in domains_in_goal:
success_stat = 0
domain_success = 0
if (len(real_requestables[domain]) == 0):
success += 1
success_stat = 1
stats[domain][1] = success_stat
continue
for request in set(provided_requestables[domain]):
if (request in real_requestables[domain]):
domain_success += 1
if (domain_success >= len(real_requestables[domain])):
success += 1
success_stat = 1
stats[domain][1] = success_stat
if soft_acc:
success = (float(success) / len(real_requestables))
elif (success >= len(real_requestables)):
success = 1
else:
success = 0
return (success, match, stats)
def _evaluateRealDialogue(self, dialog, filename):
domains = ['restaurant', 'hotel', 'attraction', 'train', 'taxi', 'hospital', 'police']
requestables = ['phone', 'address', 'postcode', 'reference', 'id']
domains_in_goal = []
goal = {}
for domain in domains:
if dialog['goal'][domain]:
goal = self._parseGoal(goal, dialog, domain)
domains_in_goal.append(domain)
real_requestables = {}
provided_requestables = {}
venue_offered = {}
for domain in goal.keys():
provided_requestables[domain] = []
venue_offered[domain] = []
real_requestables[domain] = goal[domain]['requestable']
m_targetutt = [turn['text'] for (idx, turn) in enumerate(dialog['log']) if ((idx % 2) == 1)]
for t in range(len(m_targetutt)):
for domain in domains_in_goal:
sent_t = m_targetutt[t]
if (((domain + '_name') in sent_t) or ('_id' in sent_t)):
if (domain in ['restaurant', 'hotel', 'attraction', 'train']):
venues = self.db.queryResultVenues(domain, dialog['log'][((t * 2) + 1)])
if ((len(venue_offered[domain]) == 0) and venues):
venue_offered[domain] = random.sample(venues, 1)
else:
flag = False
for ven in venues:
if (venue_offered[domain][0] == ven):
flag = True
break
if ((not flag) and venues):
venue_offered[domain] = random.sample(venues, 1)
else:
venue_offered[domain] = (('[' + domain) + '_name]')
for requestable in requestables:
if (requestable == 'reference'):
if ((domain + '_reference') in sent_t):
if ('restaurant_reference' in sent_t):
if (dialog['log'][(t * 2)]['db_pointer'][(- 5)] == 1):
provided_requestables[domain].append('reference')
elif ('hotel_reference' in sent_t):
if (dialog['log'][(t * 2)]['db_pointer'][(- 3)] == 1):
provided_requestables[domain].append('reference')
elif ('train_reference' in sent_t):
if (dialog['log'][(t * 2)]['db_pointer'][(- 1)] == 1):
provided_requestables[domain].append('reference')
else:
provided_requestables[domain].append('reference')
elif (((domain + '_') + requestable) in sent_t):
provided_requestables[domain].append(requestable)
for domain in domains_in_goal:
if ('info' in dialog['goal'][domain]):
if ('name' in dialog['goal'][domain]['info']):
venue_offered[domain] = (('[' + domain) + '_name]')
if (domain in ['taxi', 'police', 'hospital']):
venue_offered[domain] = (('[' + domain) + '_name]')
if ((domain == 'train') and ((not venue_offered[domain]) and ('id' not in goal['train']['requestable']))):
venue_offered[domain] = (('[' + domain) + '_name]')
stats = {'restaurant': [0, 0, 0], 'hotel': [0, 0, 0], 'attraction': [0, 0, 0], 'train': [0, 0, 0], 'taxi': [0, 0, 0], 'hospital': [0, 0, 0], 'police': [0, 0, 0]}
(match, success) = (0, 0)
for domain in goal.keys():
match_stat = 0
if (domain in ['restaurant', 'hotel', 'attraction', 'train']):
goal_venues = self.db.queryResultVenues(domain, dialog['goal'][domain]['info'], real_belief=True)
if ((type(venue_offered[domain]) is str) and ('_name' in venue_offered[domain])):
match += 1
match_stat = 1
elif ((len(venue_offered[domain]) > 0) and (venue_offered[domain][0] in goal_venues)):
match += 1
match_stat = 1
elif ((domain + '_name') in venue_offered[domain]):
match += 1
match_stat = 1
stats[domain][0] = match_stat
stats[domain][2] = 1
if (match == len(goal.keys())):
match = 1
else:
match = 0
if match:
for domain in domains_in_goal:
domain_success = 0
success_stat = 0
if (len(real_requestables[domain]) == 0):
success += 1
success_stat = 1
stats[domain][1] = success_stat
continue
for request in set(provided_requestables[domain]):
if (request in real_requestables[domain]):
domain_success += 1
if (domain_success >= len(real_requestables[domain])):
success += 1
success_stat = 1
stats[domain][1] = success_stat
if (success >= len(real_requestables)):
success = 1
else:
success = 0
return (goal, success, match, real_requestables, stats)
def _parse_entities(self, tokens):
entities = []
for t in tokens:
if (('[' in t) and (']' in t)):
entities.append(t)
return entities
def evaluateModel_gpt2(self, dialogues, real_dialogues=False, mode='valid'):
delex_dialogues = self.delex_dialogues
(successes, matches) = (0, 0)
total = 0
gen_stats = {'restaurant': [0, 0, 0], 'hotel': [0, 0, 0], 'attraction': [0, 0, 0], 'train': [0, 0, 0], 'taxi': [0, 0, 0], 'hospital': [0, 0, 0], 'police': [0, 0, 0]}
sng_gen_stats = {'restaurant': [0, 0, 0], 'hotel': [0, 0, 0], 'attraction': [0, 0, 0], 'train': [0, 0, 0], 'taxi': [0, 0, 0], 'hospital': [0, 0, 0], 'police': [0, 0, 0]}
for (idx, (filename, dial)) in enumerate(dialogues.items()):
data = delex_dialogues[filename]
(goal, success, match, requestables, _) = self._evaluateRealDialogue(data, filename)
(success, match, stats) = self._evaluateGeneratedDialogue(filename, dial, goal, data, requestables, soft_acc=(mode == 'soft'))
successes += success
matches += match
total += 1
for domain in gen_stats.keys():
gen_stats[domain][0] += stats[domain][0]
gen_stats[domain][1] += stats[domain][1]
gen_stats[domain][2] += stats[domain][2]
if ('SNG' in filename):
for domain in gen_stats.keys():
sng_gen_stats[domain][0] += stats[domain][0]
sng_gen_stats[domain][1] += stats[domain][1]
sng_gen_stats[domain][2] += stats[domain][2]
if real_dialogues:
corpus = []
model_corpus = []
bscorer = BLEUScorer()
for dialogue in dialogues:
data = real_dialogues[dialogue]
(model_turns, corpus_turns) = ([], [])
for (idx, turn) in enumerate(data):
corpus_turns.append([turn])
for turn in dialogues[dialogue]['responses']:
model_turns.append([turn])
if (len(model_turns) == len(corpus_turns)):
corpus.extend(corpus_turns)
model_corpus.extend(model_turns)
else:
raise 'Wrong amount of turns'
model_corpus_len = []
for turn in model_corpus:
if (turn[0] == ''):
model_corpus_len.append(True)
else:
model_corpus_len.append(False)
if all(model_corpus_len):
print('no model response')
model_corpus = corpus
blue_score = bscorer.score(model_corpus, corpus)
else:
blue_score = 0.0
report = ''
report += ('{} Corpus Matches : {:2.2f}%'.format(mode, ((matches / float(total)) * 100)) + '\n')
report += ('{} Corpus Success : {:2.2f}%'.format(mode, ((successes / float(total)) * 100)) + '\n')
report += ('{} Corpus BLEU : {:2.4f}%'.format(mode, blue_score) + '\n')
report += ('Total number of dialogues: %s ' % total)
print(report)
return (report, (successes / float(total)), (matches / float(total))) |
def get_sentence_markup(sentence, layer, markup):
doc_name = sentence.document.name
if ((doc_name not in markup) or (layer not in markup[doc_name]) or (sentence.position not in markup[doc_name][layer]) or (markup[doc_name][layer][sentence.position] == None)):
return []
return sorted(markup[doc_name][layer][sentence.position], key=(lambda x: x.char_start), reverse=0) |
class Function_beta(GinacFunction):
def __init__(self):
GinacFunction.__init__(self, 'beta', nargs=2, latex_name='\\operatorname{B}', conversions=dict(maxima='beta', mathematica='Beta', maple='Beta', sympy='beta', fricas='Beta', giac='Beta'))
def _method_arguments(self, x, y):
return [x, y] |
class StackLayers(nn.Module):
def __init__(self, num_block_layers, dropped_mixed_ops, softmax_temp=1.0):
super(StackLayers, self).__init__()
if (num_block_layers != 0):
self.stack_layers = nn.ModuleList()
for i in range(num_block_layers):
self.stack_layers.append(MixedOp(dropped_mixed_ops[i], softmax_temp))
else:
self.stack_layers = None
def forward(self, x, alphas, stack_index, stack_sub_obj):
if (self.stack_layers is not None):
count_sub_obj = 0
for (stack_layer, alpha, stack_idx, layer_sub_obj) in zip(self.stack_layers, alphas, stack_index, stack_sub_obj):
(x, sub_obj) = stack_layer(x, alpha, stack_idx, layer_sub_obj)
count_sub_obj += sub_obj
return (x, count_sub_obj)
else:
return (x, 0) |
def test_RecordArray_getitem():
array = ak.highlevel.Array([{'x': 0.0, 'y': []}, {'x': 1.1, 'y': [1]}, {'x': 2.2, 'y': [2, 2]}, {'x': 3.3, 'y': [3, 3, 3]}, {'x': 4.4, 'y': [4, 4, 4, 4]}], check_valid=True)
def f1(x, i):
return x[i]
assert (ak.operations.to_list(f1(array, 3)) == {'x': 3.3, 'y': [3, 3, 3]})
assert (ak.operations.to_list(f1(array, 2)) == {'x': 2.2, 'y': [2, 2]})
assert (ak.operations.to_list(f1(array, 1)) == {'x': 1.1, 'y': [1]})
def f2(x, i1, i2):
return x[i1:i2]
assert (ak.operations.to_list(f2(array, 1, 4)) == [{'x': 1.1, 'y': [1]}, {'x': 2.2, 'y': [2, 2]}, {'x': 3.3, 'y': [3, 3, 3]}])
array = ak.highlevel.Array([[{'x': 0.0, 'y': []}, {'x': 1.1, 'y': [1]}, {'x': 2.2, 'y': [2, 2]}], [], [{'x': 3.3, 'y': [3, 3, 3]}, {'x': 4.4, 'y': [4, 4, 4, 4]}]], check_valid=True)
def f3(x, i, j):
return x[i][j]
assert (ak.operations.to_list(f3(array, 2, (- 2))) == {'x': 3.3, 'y': [3, 3, 3]}) |
def _run_task(test_template: Callable, tasks_queue: Queue, events_queue: Queue, generator_done: threading.Event, checks: Iterable[CheckFunction], targets: Iterable[Target], data_generation_methods: Iterable[DataGenerationMethod], settings: hypothesis.settings, generation_config: GenerationConfig, seed: (int | None), results: TestResultSet, stateful: (Stateful | None), stateful_recursion_limit: int, headers: (dict[(str, Any)] | None)=None, **kwargs: Any) -> None:
as_strategy_kwargs = {}
if (headers is not None):
as_strategy_kwargs['headers'] = {key: value for (key, value) in headers.items() if (key.lower() != 'user-agent')}
def _run_tests(maker: Callable, recursion_level: int=0) -> None:
if (recursion_level > stateful_recursion_limit):
return
for _result in maker(test_template, settings=settings, generation_config=generation_config, seed=seed, as_strategy_kwargs=as_strategy_kwargs):
(_operation, test) = _result.ok()
feedback = Feedback(stateful, _operation)
for _event in run_test(_operation, test, checks, data_generation_methods, targets, results, recursion_level=recursion_level, feedback=feedback, headers=headers, **kwargs):
events_queue.put(_event)
_run_tests(feedback.get_stateful_tests, (recursion_level + 1))
with capture_hypothesis_output():
while True:
try:
result = tasks_queue.get(timeout=0.001)
except queue.Empty:
if generator_done.is_set():
break
continue
if isinstance(result, Ok):
operation = result.ok()
test_function = create_test(operation=operation, test=test_template, settings=settings, seed=seed, data_generation_methods=list(data_generation_methods), generation_config=generation_config, as_strategy_kwargs=as_strategy_kwargs)
items = Ok((operation, test_function))
_run_tests((lambda *_, **__: (items,)))
else:
for event in handle_schema_error(result.err(), results, data_generation_methods, 0):
events_queue.put(event) |
def build_regularization_map(volume, threshold, rw0, rw1):
data = np.array(volume, copy=False)
regmap = np.zeros(data.shape, dtype=np.float32)
regmap = ((rw0 * (data < threshold)) + (rw1 * (data >= threshold))).astype(np.float32)
regmap = pydeform.Volume(regmap)
regmap.copy_meta_from(volume)
return regmap |
def _is_packed_list(list_value):
return (_is_value(list_value) and (list_value.node().kind() == 'prim::ListConstruct')) |
def _assemble_arrayl(lines, stretch=None):
return LatexExpr(((((('' if generate_real_LaTeX else '%notruncate\n') + ('' if (stretch is None) else ('\\renewcommand{\\arraystretch}{%f}\n' % stretch))) + '\\begin{array}{l}\n') + '\\\\\n'.join(lines)) + '\n\\end{array}')) |
def parse_lexicon(line: str) -> Tuple[(str, List[str])]:
line.replace('\t', ' ')
(word, *phonemes) = line.split()
return (word, phonemes) |
def plot_sqlite_db(sqliteConnection: Engine, analyze: bool=False):
db_name = os.path.splitext(os.path.basename(sqliteConnection.url.database))[0]
schema_name = []
schema_name.append(db_name)
if analyze:
sqliteConnection.execute('ANALYZE')
try:
version_sql = pd.read_sql('select sqlite_version();', sqliteConnection)
index = pd.read_sql("SELECT * FROM sqlite_master WHERE type = 'index'", sqliteConnection)
table_sql = pd.read_sql("select type, tbl_name as table_name, sql from sqlite_master where type = 'table' AND tbl_name not like 'sqlite_%';", sqliteConnection)
table_row_sql = pd.read_sql("select DISTINCT tbl_name AS table_name, CASE WHEN stat is null then 0 else cast(stat as INT) END row_count\n from sqlite_master m\n LEFT JOIN sqlite_stat1 stat on m.tbl_name = stat.tbl\n where m.type='table'\n and m.tbl_name not like 'sqlite_%'\n order by 1", sqliteConnection)
all_cols = pd.read_sql("SELECT tbl_name as table_name, p.name as col_name, p.type as type,\n CASE WHEN `notnull` = 0 THEN 'False'\n ELSE 'True' END AS attnotnull, dflt_value as `default`, pk, sql\n FROM\n sqlite_master AS m\n JOIN\n pragma_table_info(m.name) AS p\n WHERE tbl_name not like 'sqlite_%'\n ORDER BY\n m.name,\n p.cid", sqliteConnection)
view_sql = pd.read_sql("select type, tbl_name as view_name, sql AS definition from sqlite_master where type = 'view' AND tbl_name not like 'sqlite_%';", sqliteConnection)
fk_sql = pd.read_sql('SELECT \'foreign key\' AS constraint_type, tbl_name as table_name, `from` AS col_name,\n `table` AS ref_table, `to` AS ref_col, sql AS constraint_def, on_update AS "update_rule", on_delete AS "delete_rule"\n FROM\n sqlite_master AS m\n JOIN\n pragma_foreign_key_list(m.name) AS p WHERE m.type = \'table\'', sqliteConnection)
pk_sql = pd.read_sql("SELECT DISTINCT 'primary key' AS constraint_type, tbl_name as table_name\n ,group_concat(p.name) OVER (\n PARTITION BY tbl_name) AS col_name, sql AS constraint_def\n FROM\n sqlite_master AS m\n JOIN\n pragma_table_info(m.name) AS p\n WHERE tbl_name not like 'sqlite_%' AND pk != 0\n ORDER BY\n m.name,\n p.cid", sqliteConnection)
uk_sql = pd.read_sql("SELECT DISTINCT 'unique key' AS constraint_type, tbl_name as table_name, p.name as col_name, sql AS constraint_def\n FROM\n sqlite_master AS m\n JOIN\n pragma_index_list(m.name) AS p WHERE m.type = 'table' AND `unique` = 1 AND origin not in ('pk', 'fk')", sqliteConnection)
(pk_sql['ref_table'], pk_sql['ref_col'], uk_sql['ref_table'], uk_sql['ref_col']) = (None, None, None, None)
except OperationalError:
raise Exception("Cannot read statistics from the database. Please run 'analyze' in the database to collect the statistics first, or set analyze=True to allow us do this (note that 'analyze' usually collects the statistics and stores the result in the database)")
pk_sql = pk_sql[['constraint_type', 'table_name', 'col_name', 'ref_table', 'ref_col', 'constraint_def']]
uk_sql = uk_sql[['constraint_type', 'table_name', 'col_name', 'ref_table', 'ref_col', 'constraint_def']]
pk_fk = pd.concat([pk_sql, fk_sql, uk_sql]).reset_index(drop=True)
table_list = list(table_sql['table_name'])
view_list = list(view_sql['view_name'])
overview_dict = {}
overview_dict['table_schema'] = dict([(x, 'sakila') for x in table_list])
overview_dict['num_of_schemas'] = 1
overview_dict['schema_names'] = schema_name
overview_dict['num_of_tables'] = int(len(table_list))
overview_dict['table_names'] = table_list
overview_dict['num_of_views'] = int(len(view_list))
overview_dict['view_names'] = view_list
overview_dict['connection_url'] = sqliteConnection.url
overview_dict['view_schema'] = dict([(x, 'sakila') for x in view_list])
overview_dict['tables_no_index'] = list(table_sql[(~ table_sql['table_name'].isin(set(pk_sql['table_name'])))]['table_name'])
overview_dict['num_of_pk'] = int(len(pk_sql))
overview_dict['num_of_fk'] = int(len(fk_sql))
overview_dict['num_of_uk'] = int(len(uk_sql))
overview_dict['product_version'] = version_sql.values[0][0]
table_dict = {}
for i in table_list:
indices = {}
table_indexes = index.loc[(index['tbl_name'] == str(i))]
for (idx, row) in table_indexes.iterrows():
current_index = row.loc['name']
indices[current_index] = {}
index_type = row.loc['type']
if row.loc['sql']:
col_name = row.loc['sql'].split('(', 1)[1].split(' ', 1)[0].strip()[:(- 1)]
else:
col_name = None
new_index = {}
indices[current_index]['Column_name'] = col_name
indices[current_index]['Index_type'] = index_type
temp = OrderedDict()
temp_cols = all_cols[(all_cols['table_name'] == i)].drop(columns=['table_name', 'pk', 'sql']).to_dict(orient='records')
for j in temp_cols:
temp[j['col_name']] = {}
element = j.pop('col_name')
temp[element] = j
temp[element]['children'] = list(pk_fk[((pk_fk['ref_table'] == i) & (pk_fk['ref_col'] == element))]['table_name'])
temp[element]['parents'] = list(pk_fk[(((pk_fk['table_name'] == i) & (pk_fk['col_name'] == element)) & (pk_fk['constraint_type'] == 'foreign key'))]['ref_table'])
temp['num_of_parents'] = len(pk_fk[((pk_fk['table_name'] == i) & (pk_fk['constraint_type'] == 'foreign key'))])
temp['num_of_children'] = len(pk_fk[(pk_fk['ref_table'] == i)])
temp['num_of_rows'] = int(table_row_sql[(table_row_sql['table_name'] == i)]['row_count'].values[0])
temp['num_of_cols'] = len(all_cols[(all_cols['table_name'] == i)])
temp['constraints'] = {}
temp_pk_fk = pk_fk[(pk_fk['table_name'] == i)].drop(columns=['table_name']).to_dict(orient='records')
(fk_counter, uk_counter) = (1, 1)
for j in temp_pk_fk:
if (j['constraint_type'] == 'primary key'):
element = (i + '_pkey')
temp['constraints'][element] = {}
elif (j['constraint_type'] == 'foreign key'):
element = ((i + '_fkey') + str(fk_counter))
temp['constraints'][element] = {}
fk_counter += 1
elif (j['constraint_type'] == 'unique key'):
element = ((i + '_ukey') + str(uk_counter))
temp['constraints'][element] = {}
uk_counter += 1
temp['constraints'][element] = j
temp['indices'] = indices
table_dict[i] = temp
view_dict = {}
for i in view_list:
temp = {}
temp_cols = all_cols[(all_cols['table_name'] == i)].drop(columns=['table_name', 'pk', 'sql']).to_dict(orient='records')
for j in temp_cols:
temp[j['col_name']] = {}
element = j.pop('col_name')
temp[element] = j
temp['num_of_cols'] = len(temp_cols)
temp['definition'] = view_sql[(view_sql['view_name'] == i)]['definition'].values[0]
view_dict[i] = temp
return (overview_dict, table_dict, view_dict) |
class Callback(EntryBase):
def __init__(self, j):
super().__init__(j, 'callback')
self.return_value_type = None
self.params = []
if ('parameters' in j):
for x in j['parameters']:
field = Field(x)
if (field.name.snake_case == ''):
self.return_value_type = field.type
else:
self.params += [field] |
def check_database_table(db_name):
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table';")
if (len(c.fetchall()) == 0):
create_bot_test_database(db_name) |
def load_fvd_model(device):
i3d = InceptionI3d(400, in_channels=3).to(device)
current_dir = os.path.dirname(os.path.abspath(__file__))
i3d_path = os.path.join(current_dir, 'i3d_pretrained_400.pt')
i3d.load_state_dict(torch.load(i3d_path, map_location=device))
i3d.eval()
return i3d |
def rerank(model_file, ctx_file, rnk_file, score=False):
pstree = cacb.CACBInfer()
pstree.load(model_file)
output_file = open(((rnk_file + '_CACB') + ('.f' if score else '.gen')), 'w')
begin = True
for (num_line, (ctx_line, rnk_line)) in enumerate(itertools.izip(open(ctx_file), open(rnk_file))):
suffix = ctx_line.strip().split('\t')
candidates = rnk_line.strip().split('\t')
(candidates, scores) = pstree.rerank(suffix, candidates, no_normalize=score)
if (not score):
reranked = [x[0] for x in sorted(zip(candidates, scores), key=operator.itemgetter(1), reverse=True)]
((print >> output_file), '\t'.join(reranked))
else:
if begin:
((print >> output_file), 'CACB')
begin = False
for s in scores:
((print >> output_file), s)
output_file.close() |
class TFAutoModelForSequenceClassification(object):
def __init__(self):
raise EnvironmentError('TFAutoModelForSequenceClassification is designed to be instantiated using the `TFAutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)` or `TFAutoModelForSequenceClassification.from_config(config)` methods.')
def from_config(cls, config):
for (config_class, model_class) in TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError('Unrecognized configuration class {} for this kind of TFAutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()))))
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop('config', None)
if (not isinstance(config, PretrainedConfig)):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for (config_class, model_class) in TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError('Unrecognized configuration class {} for this kind of TFAutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys())))) |
def list_fnames():
base_path = os.path.dirname(__file__)
return [os.path.join(base_path, fname) for fname in os.listdir(base_path) if fname.endswith('.conf')] |
def get_plot_font_size(font_size: Optional[int], figure_size: Tuple[(int, int)]) -> int:
if (font_size is None):
font_size = 10
if (max(figure_size) >= 256):
font_size = 12
if (max(figure_size) >= 512):
font_size = 15
return font_size |
def test_bytearray():
array = ak.contents.NumpyArray(np.frombuffer(b'hellothere', 'u1'), parameters={'__array__': 'byte'})
assert (ak.operations.to_json(array, convert_bytes=bytes.decode) == '"hellothere"') |
class Conv2dBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
super().__init__()
self.m = conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=True, weight_init='kaiming')
def forward(self, x):
x = self.m(x)
return F.relu(x) |
class InstrWorld(object):
sheet_name = 'Instr World'
def __init__(self, reg_list, columns, writer, split=False):
self.reg_list = reg_list
self.columns = columns
self.writer = writer
self.split = split
self.index = 1
def write(self, out_file):
df = pd.DataFrame(self.reg_list, columns=self.columns, index=None)
if self.split:
out_file = out_file.replace('xlsx', 'csv')
df.to_csv(out_file, index=False)
else:
df.to_excel(self.writer, sheet_name=self.sheet_name, index=False, startrow=4, engine='xlsxwriter')
def set_style(cls, out_file, frozen=True):
wb = load_workbook(out_file)
ws = wb[cls.sheet_name]
if frozen:
_cell = ws.cell(6, 1)
ws.freeze_panes = _cell
wb.save(out_file) |
class TensorFlowBenchmarkArguments(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def simGetJointTargetVelocity(jointHandle):
vel = ffi.new('float *')
lib.simGetJointTargetVelocity(jointHandle, vel)
return vel[0] |
def probable_pivot_columns(A):
p = ZZ.random_element(10007, 46000).next_prime()
return A._reduce(p).pivots() |
def build_dict(imgs, wtoi, params):
wtoi['<eos>'] = 0
count_imgs = 0
refs_words = []
refs_idxs = []
for img in imgs:
if ((params['split'] == img['split']) or ((params['split'] == 'train') and (img['split'] == 'restval')) or (params['split'] == 'all')):
ref_words = []
ref_idxs = []
for sent in img['sentences']:
tmp_tokens = (sent['tokens'] + ['<eos>'])
tmp_tokens = [(_ if (_ in wtoi) else 'UNK') for _ in tmp_tokens]
ref_words.append(' '.join(tmp_tokens))
ref_idxs.append(' '.join([str(wtoi[_]) for _ in tmp_tokens]))
refs_words.append(ref_words)
refs_idxs.append(ref_idxs)
count_imgs += 1
print('total imgs:', count_imgs)
ngram_words = compute_doc_freq(create_crefs(refs_words))
ngram_idxs = compute_doc_freq(create_crefs(refs_idxs))
return (ngram_words, ngram_idxs, count_imgs) |
class ROIBoxHead(torch.nn.Module):
def __init__(self, cfg, in_channels):
super(ROIBoxHead, self).__init__()
self.feature_extractor = make_roi_box_feature_extractor(cfg, in_channels)
self.predictor = make_roi_box_predictor(cfg, self.feature_extractor.out_channels)
self.post_processor = make_roi_box_post_processor(cfg)
self.loss_evaluator = make_roi_box_loss_evaluator(cfg)
if cfg.MODEL.ROI_BOX_HEAD.FREEZE_FEATURE_EXTRACTOR:
for p in self.feature_extractor.parameters():
p.requires_grad = False
def forward(self, features, proposals, targets=None):
if self.training:
with torch.no_grad():
proposals = self.loss_evaluator.subsample(proposals, targets)
x = self.feature_extractor(features, proposals)
(class_logits, box_regression) = self.predictor(x)
if (not self.training):
result = self.post_processor((class_logits, box_regression), proposals)
return (x, result, {})
(loss_classifier, loss_box_reg) = self.loss_evaluator([class_logits], [box_regression])
return (x, proposals, dict(loss_classifier=loss_classifier, loss_box_reg=loss_box_reg)) |
def vgg_16(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.5, spatial_squeeze=True, scope='vgg_16', fc_conv_padding='VALID', global_pool=False):
with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:
end_points_collection = (sc.original_name_scope + '_end_points')
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if global_pool:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if num_classes:
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='fc8')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[(sc.name + '/fc8')] = net
return (net, end_points) |
def _check_executable(cmd):
if (subprocess.call(f'which {cmd}', shell=True) != 0):
return False
else:
return True |
def unfold_dict_recursively(_dict, run_no, num_training_runs):
row = {}
for entry in _dict:
if (type(_dict[entry]) == dict):
row.update(unfold_dict_recursively(_dict[entry], run_no, num_training_runs))
elif ((type(_dict[entry]) == list) and (len(_dict[entry]) == num_training_runs)):
row[entry] = _dict[entry][run_no]
else:
row[entry] = _dict[entry]
return row |
_task('wsc')
class WSCTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', metavar='DIR', help='path to data directory; we load <split>.jsonl')
parser.add_argument('--init-token', type=int, default=None, help='add token at the beginning of each batch item')
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol('<mask>')
self.bpe = encoders.build_bpe(args)
self.tokenizer = encoders.build_tokenizer(args)
if (args.bpe == 'gpt2'):
self.leading_space = True
self.trailing_space = False
else:
self.leading_space = False
self.trailing_space = True
def load_dictionary(cls, filename):
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
def setup_task(cls, args, **kwargs):
assert (args.criterion == 'wsc'), 'Must set --criterion=wsc'
vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(vocab)))
return cls(args, vocab)
def binarize(self, s: str, append_eos: bool=False):
if (self.tokenizer is not None):
s = self.tokenizer.encode(s)
if (self.bpe is not None):
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(s, append_eos=append_eos, add_if_not_exist=False).long()
if (self.args.init_token is not None):
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space):
toks = self.binarize(((((prefix + leading_space) + txt) + trailing_space) + suffix), append_eos=True)
mask = torch.zeros_like(toks, dtype=torch.bool)
mask_start = len(self.binarize(prefix))
mask_size = len(self.binarize((leading_space + txt)))
mask[mask_start:(mask_start + mask_size)] = 1
return (toks, mask)
def load_dataset(self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs):
if (data_path is None):
data_path = os.path.join(self.args.data, (split + '.jsonl'))
if (not os.path.exists(data_path)):
raise FileNotFoundError('Cannot find data: {}'.format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
labels = []
for (sentence, pronoun_span, query, label) in wsc_utils.jsonl_iterator(data_path):
prefix = sentence[:pronoun_span.start].text
suffix = sentence[pronoun_span.end:].text_with_ws
leading_space = (' ' if sentence[:pronoun_span.start].text_with_ws.endswith(' ') else '')
trailing_space = (' ' if pronoun_span.text_with_ws.endswith(' ') else '')
cand_spans = wsc_utils.filter_noun_chunks(wsc_utils.extended_noun_chunks(sentence), exclude_pronouns=True, exclude_query=query, exact_match=False)
if (query is not None):
(query_toks, query_mask) = self.binarize_with_mask(query, prefix, suffix, leading_space, trailing_space)
query_len = len(query_toks)
else:
(query_toks, query_mask, query_len) = (None, None, 0)
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
(cand_toks, cand_masks) = ([], [])
for cand_span in cand_spans:
(toks, mask) = self.binarize_with_mask(cand_span.text, prefix, suffix, leading_space, trailing_space)
cand_toks.append(toks)
cand_masks.append(mask)
cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad())
cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0)
assert (cand_toks.size() == cand_masks.size())
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_masks)
candidate_lengths.append(cand_toks.size(1))
labels.append(label)
query_lengths = np.array(query_lengths)
query_tokens = ListDataset(query_tokens, query_lengths)
query_masks = ListDataset(query_masks, query_lengths)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = ListDataset(candidate_tokens, candidate_lengths)
candidate_masks = ListDataset(candidate_masks, candidate_lengths)
labels = ListDataset(labels, ([1] * len(labels)))
dataset = {'id': IdDataset(), 'query_tokens': query_tokens, 'query_masks': query_masks, 'candidate_tokens': candidate_tokens, 'candidate_masks': candidate_masks, 'labels': labels, 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(query_tokens, reduce=True)}
nested_dataset = NestedDictionaryDataset(dataset, sizes=[query_lengths])
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(nested_dataset, sort_order=[shuffle])
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
def build_dataset_for_inference(self, sample_json):
with tempfile.NamedTemporaryFile(buffering=0) as h:
h.write((json.dumps(sample_json) + '\n').encode('utf-8'))
dataset = self.load_dataset('disambiguate_pronoun', data_path=h.name, return_only=True)
return dataset
def disambiguate_pronoun(self, model, sentence, use_cuda=False):
sample_json = wsc_utils.convert_sentence_to_json(sentence)
dataset = self.build_dataset_for_inference(sample_json)
sample = dataset.collater([dataset[0]])
if use_cuda:
sample = utils.move_to_cuda(sample)
def get_masked_input(tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask.bool()] = self.mask
return masked_tokens
def get_lprobs(tokens, mask):
(logits, _) = model(src_tokens=get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=(- 1), dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze((- 1))).squeeze((- 1))
mask = mask.type_as(scores)
scores = ((scores * mask).sum(dim=(- 1)) / mask.sum(dim=(- 1)))
return scores
cand_lprobs = get_lprobs(sample['candidate_tokens'][0], sample['candidate_masks'][0])
if (sample['query_tokens'][0] is not None):
query_lprobs = get_lprobs(sample['query_tokens'][0].unsqueeze(0), sample['query_masks'][0].unsqueeze(0))
return ((query_lprobs >= cand_lprobs).all().item() == 1)
else:
best_idx = cand_lprobs.argmax().item()
full_cand = sample['candidate_tokens'][0][best_idx]
mask = sample['candidate_masks'][0][best_idx]
toks = full_cand[mask.bool()]
return self.bpe.decode(self.source_dictionary.string(toks)).strip()
def source_dictionary(self):
return self.vocab
def target_dictionary(self):
return self.vocab |
class fx2mlir(object):
def __init__(self, submodule_name: str, args: Namespace, bwd_graph: bool):
self.work_dir = submodule_name.split('_')[0]
tmp = ('bwd' if bwd_graph else 'fwd')
self.model_name = f'{submodule_name}_{tmp}'
self.args = args
self.bwd = bwd_graph
self.bmodel_path = None
self.ctx = Context()
self.ctx.allow_unregistered_dialects = True
loc = Location.unknown(self.ctx)
self.ctx.__enter__()
loc.__enter__()
self.weight_file = f'graph_for_jit_{self.model_name}.npz'
self.input_nodes = []
self.output_nodes = []
self.output_dtypes = []
self.return_none_count = 0
self.operands = dict()
self.weights_data = dict()
self.load_weight = dict()
self.const_val = dict()
self.op_factory = {'convolution': (lambda node: self.convert_base_conv_op(node)), 'convolution_backward': (lambda node: self.convert_backward_conv_op(node)), 'permute': (lambda node: self.convert_permute_op(node)), 'relu': (lambda node: self.convert_relu_op(node)), 'max_pool2d_with_indices': (lambda node: self.convert_maxpool2d_with_indices_op(node)), 'add': (lambda node: self.convert_add_op(node)), 'mul': (lambda node: self.convert_mul_op(node)), 'view': (lambda node: self.convert_reshape_op(node)), '_unsafe_view': (lambda node: self.convert_reshape_op(node)), 'reshape': (lambda node: self.convert_reshape_op(node)), 'mm': (lambda node: self.convert_matmul_op(node)), 'bmm': (lambda node: self.convert_matmul_op(node)), 'matmul': (lambda node: self.convert_matmul_op(node)), 'squeeze': (lambda node: self.convert_squeeze_op(node)), 'unsqueeze': (lambda node: self.convert_unsqueeze_op(node)), 'getitem': (lambda node: self.convert_getitem_op(node)), 'to': (lambda node: self.convert_to_op(node)), 'cat': (lambda node: self.convert_concat_op(node)), 'sum': (lambda node: self.convert_sum_op(node)), 'mean': (lambda node: self.convert_mean_op(node)), 'clone': (lambda node: self.convert_clone_op(node)), '_native_batch_norm_legit_functional': (lambda node: self.convert_batch_norm_op(node)), 'native_batch_norm_backward': (lambda node: self.convert_batch_norm_backward_op(node)), 'full': (lambda node: self.convert_full_op(node)), 'arange': (lambda node: self.convert_arange_op(node)), 'scalar_tensor': (lambda node: self.convert_scalar_tensor_op(node)), 'slice': (lambda node: self.convert_slice_op(node)), 'embedding': (lambda node: self.convert_embedding_op(node)), 'ne': (lambda node: self.convert_compare_op(node, 'NotEqual')), 'where': (lambda node: self.convert_where_op(node)), '_to_copy': (lambda node: self.convert_copy_op(node)), 'var': (lambda node: self.convert_var_op(node)), 'div': (lambda node: self.convert_div_op(node)), 'rsqrt': (lambda node: self.convert_rsqrt_op(node)), 'sub': (lambda node: self.convert_sub_op(node)), 'addmm': (lambda node: self.convert_addmm_op(node)), 'split': (lambda node: self.convert_split_op(node)), 'expand': (lambda node: self.convert_expand_op(node)), 'amax': (lambda node: self.convert_amax_op(node, False)), 'exp': (lambda node: self.convert_math_op(node, 'exp')), 'erf': (lambda node: self.convert_math_op(node, 'erf')), 'select': (lambda node: self.convert_select_op(node)), 'log': (lambda node: self.convert_math_op(node, 'log')), 'gather': (lambda node: self.convert_gather_op(node)), 'neg': (lambda node: self.convert_neg_op(node)), 't': (lambda node: self.convert_transpose_op(node)), 'native_layer_norm': (lambda node: self.convert_layer_norm_op(node)), 'native_layer_norm_backward': (lambda node: self.convert_layer_norm_backward_op(node)), 'transpose': (lambda node: self.convert_transpose_op(node)), '_softmax': (lambda node: self.convert_softmax_op(node, log=False)), '_log_softmax': (lambda node: self.convert_softmax_op(node, log=True)), 'nll_loss_forward': (lambda node: self.convert_nllloss_op(node)), 'constant': (lambda node: self.convert_constant(node)), 'broadcast_in_dim': (lambda node: self.convert_broadcast_op(node)), 'threshold_backward': (lambda node: self.convert_threshold_backward_op(node)), '_softmax_backward_data': (lambda node: self.convert_softmax_backward_data_op(node)), 'embedding_dense_backward': (lambda node: self.convert_embedding_dense_backward_op(node))}
self.mlir_type = {'F32': F32Type.get(), 'F16': F16Type.get(), 'BF16': BF16Type.get()}
def convert_a_op(self, node):
print(f'convert_a_op, node.name:', node.name, 'target:', node.target, 'args:', node.args, 'users:', list(node.users.keys()), 'kwargs:', node.kwargs, 'val:', (node.meta['val'] if ('val' in node.meta) else 'None'), 'tensor_meta:', (node.meta['tensor_meta'] if ('tensor_meta' in node.meta) else 'None'))
print('in shapes:', [list(i.meta['val'].shape) for i in node.args if isinstance(i, torch.fx.Node)])
op_type = torch.typename(node.target).split('.')[(- 1)]
if (op_type not in self.op_factory):
print(f'{op_type} not in op_factory')
return (None, None)
in_args_txt_list = []
in_ref_data = {}
for (i, arg) in enumerate(node.args):
if isinstance(arg, torch.fx.Node):
self.input_nodes.append([i, arg])
shape = list(arg.meta['val'].size())
shape = ([1] if (shape == []) else shape)
if (('val' in arg.meta) and (arg.meta['val'].dtype == torch.int64)):
in_ref_data[arg.name] = torch.randint(0, 10, shape)
else:
in_ref_data[arg.name] = torch.randn(shape)
in_args_txt_list.append('%args{}: {} loc(unknown)'.format(i, RankedTensorType.get(shape, F32Type.get()).__str__()))
np.savez(f'in_ref_data_a_op_{node.name}.npz', **in_ref_data)
if isinstance(node.meta['val'], (tuple, list)):
self.output_dtypes = [i.dtype for i in node.meta['val'] if (i is not None)]
else:
self.output_dtypes.append(node.meta['val'].dtype)
output_txt = []
for (shape, dtype) in zip(self.get_output_shapes(node), self.get_output_dtypes(node)):
output_txt.append(self.get_tensor_type(shape, dtype).__str__())
output_str = ', '.join(output_txt)
if (len(output_txt) > 1):
output_str = '({})'.format(output_str)
self.createMlirModuleAndInput(', '.join(in_args_txt_list), output_str)
self.op_factory.get(op_type, (lambda x: NoneAndRaise(x)))(node)
operands = []
if isinstance(self.operands[node], list):
operands.extend(self.operands[node])
else:
operands.append(self.operands[node])
return_op = Operation.create('func.return', operands=operands, results=[])
self.insert_point.insert(return_op)
mlir_txt = self.mlir_module.operation.get_asm(enable_debug_info=True)
mlir_file = 'out.mlir'
mlir_origin = mlir_file.replace('.mlir', '_origin.mlir', 1)
with open(mlir_origin, 'w') as f:
f.write(mlir_txt)
self.WeightToNpz(self.weight_file)
mlir_opt_for_top(mlir_origin, mlir_file)
print('Save mlir file: {}'.format(mlir_file))
if self.args.cmp:
tensors = mlir_inference(in_ref_data, mlir_file, True)
print('out num:', len(tensors))
np.savez('ref_data.npz', **tensors)
del tensors
free_mlir_module()
gc.collect()
tpu_ir = ('tpu_' + mlir_file)
self.bmodel_path = (tpu_ir + '.bmodel')
mlir_lowering(mlir_file, tpu_ir, 'F32', self.args.chip)
if self.args.cmp:
tensors = mlir_inference(in_ref_data, tpu_ir, True)
np.savez('tpu_ir_out_data.npz', **tensors)
del tensors
free_mlir_module()
gc.collect()
f32_blobs_compare('tpu_ir_out_data.npz', 'ref_data.npz', '0.99,0.99')
mlir_to_model(tpu_ir, self.bmodel_path, ('final_' + mlir_file))
if self.args.cmp:
tensors = bmodel_inference(self.bmodel_path, in_ref_data)
np.savez('bmodel_out_data.npz', **tensors)
del tensors
gc.collect()
f32_blobs_compare('bmodel_out_data.npz', 'ref_data.npz', '0.99,0.99')
print('jit compile ok')
return (TpuMlirModule(self.bmodel_path, self.output_dtypes, self.return_none_count), list(in_ref_data.values()))
def convert(self, module):
print(' starting parsing...')
module.to_folder(f'fx_graph_dumped_{self.model_name}', self.model_name)
with open(f'fx_graph_dumped_{self.model_name}/input_shape', 'w+') as fd:
for (i, node) in enumerate(module.graph.nodes):
if (node.op == 'placeholder'):
fd.write(f'''{node.name}*{list(node.meta['val'].size())}*{node.meta['val'].dtype}
''')
in_ref_data = {}
for (i, node) in enumerate(module.graph.nodes):
if (node.op == 'placeholder'):
shape = list(node.meta['val'].size())
print(f'>>> {i}th op, placeholder:', node.name, 'shape:', shape, 'val:', node.meta['val'])
self.input_nodes.append([i, node])
tmp = (np.random.rand(*shape) if (node.meta['val'].dtype == torch.float32) else np.random.randint(0, 1, shape))
if (node.meta['val'].dtype == torch.bool):
tmp = tmp.astype(np.bool_)
in_ref_data[((node.name + '_weight_or_param') if node.meta['tensor_meta'].requires_grad else node.name)] = tmp
np.savez(f'in_ref_data_{self.model_name}.npz', **in_ref_data)
self.input_nodes = sorted(self.input_nodes, key=(lambda x: x[0]), reverse=False)
in_args_txt_list = []
for node in self.input_nodes:
shape = list(node[1].meta['val'].size())
shape = ([1] if (shape == []) else shape)
in_args_txt_list.append('%args{}: {} loc(unknown)'.format(node[0], RankedTensorType.get(shape, F32Type.get()).__str__()))
first_call_op = True
for (i, node) in enumerate(module.graph.nodes):
if ((node.op == 'call_module') or (node.op == 'call_method') or (node.op == 'call_function')):
print(f'>>>> {i}th op, new op start:', node.name, 'val:', (node.meta['val'] if ('val' in node.meta) else 'None'), 'tensor_meta:', (node.meta['tensor_meta'] if ('tensor_meta' in node.meta) else 'None'))
if first_call_op:
output_args_txt = self.parseOutputNode([i for i in module.graph.nodes if ((i.op == 'output') and (len(i.args) > 0))][0])
self.createMlirModuleAndInput(', '.join(in_args_txt_list), output_args_txt)
first_call_op = False
op_type = torch.typename(node.target).split('.')[(- 1)]
print(f'{i}th op, node.name:', node.name, 'target:', node.target, 'op_type:', op_type, 'args:', node.args, 'users:', list(node.users.keys()), 'kwargs:', node.kwargs)
self.op_factory.get(op_type, (lambda x: NoneAndRaise(x)))(node)
return_op = list()
for (idx, _name) in enumerate(self.output_nodes):
if (_name is not None):
return_op.append(self.operands[_name])
else:
self.return_none_count += 1
self.create_return_op(return_op)
mlir_txt = self.mlir_module.operation.get_asm(enable_debug_info=True)
mlir_file = f'out_{self.model_name}.mlir'
mlir_origin = mlir_file.replace('.mlir', '_origin.mlir', 1)
with open(mlir_origin, 'w') as f:
f.write(mlir_txt)
self.WeightToNpz(self.weight_file)
mlir_opt_for_top(mlir_origin, mlir_file)
print('Save mlir file: {}'.format(mlir_file))
if self.args.cmp:
tensors = mlir_inference(in_ref_data, mlir_file, True)
print('out num:', len(tensors))
np.savez('ref_data.npz', **tensors)
del tensors
free_mlir_module()
gc.collect()
tpu_ir = ('tpu_' + mlir_file)
self.bmodel_path = os.path.join(self.work_dir, (tpu_ir + '.bmodel'))
mlir_lowering(mlir_file, tpu_ir, 'F32', self.args.chip)
if self.args.cmp:
tensors = mlir_inference(in_ref_data, tpu_ir, True)
np.savez('tpu_ir_out_data.npz', **tensors)
del tensors
free_mlir_module()
gc.collect()
f32_blobs_compare('tpu_ir_out_data.npz', 'ref_data.npz', '0.99,0.99')
mlir_to_model(tpu_ir, self.bmodel_path, ('final_' + mlir_file))
if self.args.cmp:
tensors = bmodel_inference(self.bmodel_path, in_ref_data)
np.savez('bmodel_out_data.npz', **tensors)
del tensors
gc.collect()
f32_blobs_compare('bmodel_out_data.npz', 'ref_data.npz', '0.99,0.99')
print('jit compile ok, start cmp')
mlir_mod = TpuMlirModule(self.bmodel_path, self.output_dtypes, self.return_none_count)
return mlir_mod
def parseOutputNode(self, node):
assert (node.op == 'output')
self.output_nodes = node.args[0]
output_shapes = [list(i.meta['tensor_meta'].shape) for i in node.args[0] if (i is not None)]
output_shapes = [([1] if (i == []) else i) for i in output_shapes]
self.output_dtypes = [i.meta['val'].dtype for i in node.args[0] if (i is not None)]
assert (len(output_shapes) == len(self.output_dtypes))
output_txt = ','.join([f'{self.get_tensor_type(shape, self.get_dtype(dtype)).__str__()}' for (shape, dtype) in zip(output_shapes, self.output_dtypes)])
if (len(output_shapes) > 1):
output_txt = '({})'.format(output_txt)
return output_txt
def createMlirModuleAndInput(self, in_args_txt, output_args_txt):
num_output = len(output_args_txt.split(','))
result_var_name = '%1'
result_types = output_args_txt
if (num_output > 1):
result_var_name = ','.join([f'%1#{var_id}' for var_id in range(num_output)])
result_types = output_args_txt[1:(- 1)]
main_func = '\n module attributes {{sym_name = "{name}", module.weight_file= "{weight_file}", module.platform="TORCH", module.state="{state}", module.chip="{chip}", module.train="{train}"}} {{\n func.func ({args}) -> {output} {{\n %0 = "top.None"() : () -> none loc(unknown)\n %1:{last_output_num} = "Placeholder.Op"() : () -> {output}\n return {result_var} : {result_types}\n }} loc(unknown)\n }} loc(unknown)\n '.format(name=self.model_name, weight_file=self.weight_file, state=State.TOP_F32, chip='ALL', train='true', args=in_args_txt, last_output_num=num_output, result_var=result_var_name, result_types=result_types, output=output_args_txt)
print(f'''main_func:
{main_func}
main_func end''')
self.mlir_module = Module.parse(main_func, self.ctx)
func = self.mlir_module.body.operations[0]
entry_block = func.regions[0].blocks[0]
self.insert_point = InsertionPoint(entry_block)
self.none_op = entry_block.operations[0].operation.results[0]
entry_block.operations[2].operation.erase()
entry_block.operations[1].operation.erase()
for (node, arg) in zip(self.input_nodes, entry_block.arguments):
self.create_input_op(node[1], arg)
def get_loc(self, names):
if isinstance(names, str):
return Location.fused([Location.name(names)], context=self.ctx)
elif isinstance(names, list):
return Location.fused([Location.name(n) for n in names], context=self.ctx)
elif isinstance(names, torch.fx.Node):
return Location.fused([Location.name(names.name)], context=self.ctx)
else:
raise RuntimeError('Unknown names:{}'.format(names))
def convert_scalar_param(self, scalar):
assert isinstance(scalar, (int, float))
return np.atleast_1d(scalar).astype(np.float32)
def get_tensor_type(self, output_shapes, type=None):
if (type is None):
type = F32Type.get()
if (output_shapes == []):
return UnrankedTensorType.get(type)
if (output_shapes is None):
return NoneType.get()
if isinstance(output_shapes, tuple):
output_shapes = list(output_shapes)
assert isinstance(output_shapes, list)
assert (len(output_shapes) > 0)
if ((not isinstance(output_shapes[0], list)) and (output_shapes[0] is not None)):
return RankedTensorType.get(tuple(output_shapes), type)
out_types = []
if isinstance(type, list):
for (s, t) in zip(output_shapes, type):
if (s == []):
out_types.append(UnrankedTensorType.get(t))
elif (s is None):
out_types.append(NoneType.get())
else:
out_types.append(RankedTensorType.get(tuple(s), t))
else:
for s in output_shapes:
if (s == []):
out_types.append(UnrankedTensorType.get(type))
elif (s is None):
out_types.append(NoneType.get())
else:
out_types.append(RankedTensorType.get(tuple(s), type))
return out_types
def get_dtype(self, type1):
return F32Type.get()
dtype = None
if (type1 == torch.float16):
dtype = F16Type.get()
elif (type1 == torch.float32):
dtype = F32Type.get()
elif (type1 == torch.int64):
dtype = IntegerType.get_signless(64)
elif (type1 == torch.int32):
dtype = IntegerType.get_signless(32)
return dtype
def get_output_dtypes(self, node):
dtypes = []
if ('val' in node.meta):
if isinstance(node.meta['val'], (tuple, list)):
dtypes = [(i.dtype if (i is not None) else None) for i in node.meta['val']]
else:
dtypes.append(node.meta['val'].dtype)
else:
dtypes.append(torch.float16)
dtypes = [(self.get_dtype(i) if (i is not None) else None) for i in dtypes]
return dtypes
def get_output_shapes(self, node, exclude_num=0):
shapes = []
if isinstance(node.meta['val'], (tuple, list)):
shapes = [(list(i.size()) if (i is not None) else None) for i in node.meta['val']]
else:
shapes.append(list(node.meta['val'].size()))
shapes = [([1] if ((i is not None) and (i == [])) else i) for i in shapes]
for _ in range(exclude_num):
shapes.pop()
return shapes
def create_input_op(self, node, func_arg):
init_args = {}
output_shapes = self.get_output_shapes(node)
if ((not self.bwd) and ('tensor_meta' in node.meta) and node.meta['tensor_meta'].requires_grad):
init_args['loc'] = self.get_loc(f'{node.name}_weight_or_param')
init_args['ip'] = self.insert_point
init_args['input'] = func_arg
init_args['output'] = RankedTensorType.get(output_shapes[0], F32Type.get())
input_op = top.InputOp(**init_args).output
if (node.meta['tensor_meta'].dtype != torch.float32):
new_op2 = top.WeightReorderOp(*self.get_tensor_type(output_shapes, F32Type.get()), input_op, loc=self.get_loc(node), ip=self.insert_point).output
self.operands[node] = new_op2
return
self.operands[node] = input_op
else:
init_args['loc'] = self.get_loc(node)
init_args['ip'] = self.insert_point
init_args['input'] = func_arg
init_args['output'] = RankedTensorType.get(output_shapes[0], F32Type.get())
input_op = top.InputOp(**init_args)
self.operands[node] = input_op.output
def convert_permute_op(self, node):
op = self.operands[node.args[0]]
order = node.args[1]
dtype = self.get_output_dtypes(node)
new_op = top.PermuteOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op, order=order, loc=self.get_loc(node), ip=self.insert_point).output
self.operands[node] = new_op
def convert_relu_op(self, node):
op = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
new_op = top.ReluOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op, loc=self.get_loc(node), ip=self.insert_point).output
self.operands[node] = new_op
def convert_full_op(self, node):
if (node.args[0] == []):
self.operands[node] = self.create_weight_op(f'fullOp_{node.name}', node.args[1])
else:
dtype = self.get_output_dtypes(node)
op0 = self.operands[node.args[0]]
new_op = top.ConstantFillOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op0, value=node.args[1], loc=self.get_loc(node), ip=self.insert_point).output
self.operands[node] = new_op
def convert_scalar_tensor_op(self, node):
self.operands[node] = self.create_weight_op(f'scalar_tensorOp_{node.name}', node.args[0])
def convert_div_op(self, node):
if (node.args[0] in self.operands):
in1 = self.operands[node.args[0]]
else:
in1 = self.create_weight_op(f'divOp_{node.name}_input1', node.args[0])
if (node.args[1] in self.operands):
in2 = self.operands[node.args[1]]
else:
in2 = self.create_weight_op(f'divOp_{node.name}_input2', node.args[1])
dtype = self.get_output_dtypes(node)
new_op = top.DivOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), [in1, in2], loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_op
def convert_broadcast_op(self, node):
op0 = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
shape_input = node.args[1]
dimension = node.args[2]
new_op = top.BroadcastOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op0, shape_input, dimension, loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_op
def convert_softmax_backward_data_op(self, node):
grad_output = self.operands[node.args[0]]
output = self.operands[node.args[1]]
dtype = self.get_output_dtypes(node)
new_op = top.SoftmaxBwdOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), grad_output, output, dim=node.args[2], loc=self.get_loc(node.name), ip=self.insert_point).grad_input
self.operands[node] = new_op
def convert_threshold_backward_op(self, node):
grad_out = self.operands[node.args[0]]
shape_z = list(node.args[1].meta['val'].size())
dtype = self.get_output_dtypes(node)
self_ = self.operands[node.args[1]]
threshold = node.args[2]
shape = list(node.meta['val'].size())
x_is_const = False
y_is_const = True
x_const_val = y_const_val = threshold
new_op = top.WhereOp(*self.get_tensor_type([shape], dtype), self_, grad_out, self.none_op, x_is_const=x_is_const, y_is_const=y_is_const, x_const_val=x_const_val, y_const_val=y_const_val, loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_op
def convert_add_op(self, node):
op0 = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
shape = self.get_output_shapes(node)
if isinstance(node.args[1], torch.fx.Node):
op1 = self.operands[node.args[1]]
new_op = top.AddOp(*self.get_tensor_type(shape, dtype), [op0, op1], do_relu=False, coeff=(np.atleast_1d(node.kwargs['alpha']).astype(np.float32) if ('alpha' in node.kwargs) else None), loc=self.get_loc(node), ip=self.insert_point).output
else:
op1 = np.atleast_1d(node.args[1]).astype(np.float32)
new_op = top.AddConstOp(*self.get_tensor_type(shape, dtype), op0, const_val=op1, do_relu=False, loc=self.get_loc(node), ip=self.insert_point).output
self.operands[node] = new_op
def convert_mul_op(self, node):
op0 = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
shape = self.get_output_shapes(node)
if isinstance(node.args[1], torch.fx.Node):
op1 = self.operands[node.args[1]]
new_op = top.MulOp(*self.get_tensor_type(shape, dtype), [op0, op1], do_relu=False, loc=self.get_loc(node), ip=self.insert_point).output
else:
op1 = node.args[1]
new_op = top.MulConstOp(*self.get_tensor_type(shape, dtype), op0, op1, loc=self.get_loc(node), ip=self.insert_point).output
self.operands[node] = new_op
def convert_reshape_op(self, node):
in_op = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
new_op = top.ReshapeOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), in_op, shape=node.args[1], loc=self.get_loc(node), ip=self.insert_point).output
self.operands[node] = new_op
def convert_concat_op(self, node):
operands = list()
for name in node.args[0]:
op = self.operands[name]
operands.append(op)
axis = node.args[1]
dtype = self.get_output_dtypes(node)
new_op = top.ConcatOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), operands, axis=axis, loc=self.get_loc(node), ip=self.insert_point).output
self.operands[node] = new_op
def convert_maxpool2d_with_indices_op(self, node):
op = self.operands[node.args[0]]
kernel_shape = node.args[1]
strides = node.args[2]
pads = node.args[3]
dilation = [1, 1]
ceil_mode = False
assert (np.array(dilation) == 1).all()
pads = (pads + pads)
dtype = self.get_output_dtypes(node)
outputs = top.MaxPoolWithMaskOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op, kernel_shape=kernel_shape, strides=strides, pads=pads, ceil_mode=ceil_mode, loc=self.get_loc(node), ip=self.insert_point)
self.operands[node] = [outputs.output, outputs.mask]
def convert_matmul_op(self, node):
op0 = self.operands[node.args[0]]
op1 = self.operands[node.args[1]]
dtype = self.get_output_dtypes(node)
new_op = top.MatMulOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op0, op1, self.none_op, do_relu=False, loc=self.get_loc(node), ip=self.insert_point).output
self.operands[node] = new_op
def convert_embedding_dense_backward_op(self, node):
grad_output = self.operands[node.args[0]]
indices = self.operands[node.args[1]]
dtype = self.get_output_dtypes(node)
new_op = top.EmbDenseBwdOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), grad_output, indices, num_weights=node.args[2], padding_idx=node.args[3], scale_grad_by_freq=node.args[4], loc=self.get_loc(node), ip=self.insert_point).output
self.operands[node] = new_op
def convert_squeeze_op(self, node):
op0 = self.operands[node.args[0]]
axes = node.args[1]
if isinstance(axes, int):
axes = [axes]
dtype = self.get_output_dtypes(node)
new_op = top.SqueezeOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op0, axes=axes, loc=self.get_loc(node), ip=self.insert_point).output
self.operands[node] = new_op
def convert_base_conv_op(self, node):
op = self.operands[node.args[0]]
strides = node.args[3]
dilations = node.args[5]
group = node.args[8]
kernel_shape = self.get_output_shapes(node.args[1])[0]
kernel_shape = kernel_shape[2:]
pads = node.args[4]
pads = (pads + pads)
filter_op = self.operands[node.args[1]]
if (node.args[2] is not None):
bias_op = self.operands[node.args[2]]
else:
bias_op = self.none_op
dtype = self.get_output_dtypes(node)
new_op = top.ConvOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op, filter_op, bias_op, kernel_shape=kernel_shape, strides=strides, dilations=dilations, pads=pads, group=group, do_relu=False, loc=self.get_loc(node), ip=self.insert_point).output
self.operands[node] = new_op
def convert_backward_conv_op(self, node):
grad_out = self.operands[node.args[0]]
input = self.operands[node.args[1]]
weight = self.operands[node.args[2]]
kernel_shape = list(node.args[2].meta['val'].size())
kernel_shape = kernel_shape[2:]
bias_sizes = node.args[3]
strides = node.args[4]
dilations = node.args[6]
transposed = node.args[7]
output_padding = node.args[8]
output_mask = node.args[(- 1)]
group = node.args[(- 2)]
pads = node.args[5]
pads = (pads + pads)
grad_input = grad_weight = grad_bias = self.none_op
shape0 = ([] if (node.meta['val'][0] == None) else list(node.meta['val'][0].size()))
shape1 = ([] if (node.meta['val'][1] == None) else list(node.meta['val'][1].size()))
shape2 = ([] if (node.meta['val'][2] == None) else list(node.meta['val'][2].size()))
dtype = self.get_output_dtypes(node)
bias_op = self.none_op
if output_mask[1]:
shape = list(node.args[0].meta['val'].size())
if (shape[2] > 56):
input_shape = list(node.args[1].meta['val'].size())
grad_out_shape = list(node.args[0].meta['val'].size())
transposed_grad_weight = top.ConvBwdWeightOp(*self.get_tensor_type([shape1], [dtype[1]]), input, grad_out, group, input_shape, grad_out_shape, kernel_shape, strides, dilations, pads, output_mask[(- 1)], loc=self.get_loc((node.name + '_grad_weight')), ip=self.insert_point).output
else:
(shape[0], shape[1]) = (shape[1], shape[0])
transposed_gradout = top.TransposeOp(*self.get_tensor_type([shape], dtype), grad_out, 0, 1, loc=self.get_loc((node.name + '_transposed_gradout')), ip=self.insert_point).output
shape = list(node.args[1].meta['val'].size())
(shape[0], shape[1]) = (shape[1], shape[0])
transposed_input = top.TransposeOp(*self.get_tensor_type([shape], dtype), input, 0, 1, loc=self.get_loc((node.name + '_transposed_input')), ip=self.insert_point).output
grad_weight_kernel_shape = list(node.args[0].meta['val'].size())
grad_weight_kernel_shape = grad_weight_kernel_shape[2:]
grad_weight_shape = shape1
(grad_weight_shape[0], grad_weight_shape[1]) = (grad_weight_shape[1], grad_weight_shape[0])
if ((pads[0] > 0) and (strides[0] > 1)):
new_strides = [1, 1]
new_pads = copy.deepcopy(pads)
input_shape = list(node.args[1].meta['val'].size())
pad_cal = (grad_weight_shape[2] - ((pads[0] + input_shape[2]) - (strides[0] * (grad_weight_kernel_shape[0] - 1))))
(new_pads[2], new_pads[3]) = (pad_cal, pad_cal)
grad_weight = top.ConvOp(*self.get_tensor_type([grad_weight_shape], dtype), transposed_input, transposed_gradout, bias_op, kernel_shape=grad_weight_kernel_shape, strides=new_strides, dilations=strides, pads=new_pads, group=group, do_relu=False, loc=self.get_loc((node.name + '_grad_weight')), ip=self.insert_point).output
else:
grad_weight = top.ConvOp(*self.get_tensor_type([grad_weight_shape], dtype), transposed_input, transposed_gradout, bias_op, kernel_shape=grad_weight_kernel_shape, strides=strides, dilations=strides, pads=pads, group=group, do_relu=False, loc=self.get_loc((node.name + '_grad_weight')), ip=self.insert_point).output
temp_shape = shape1
(temp_shape[0], temp_shape[1]) = (temp_shape[1], temp_shape[0])
transposed_grad_weight = top.TransposeOp(*self.get_tensor_type([temp_shape], dtype), grad_weight, 0, 1, loc=self.get_loc((node.name + '_transposed_grad_weight')), ip=self.insert_point).output
if output_mask[0]:
transposed_weight_shape = shape1
(transposed_weight_shape[0], transposed_weight_shape[1]) = (transposed_weight_shape[1], transposed_weight_shape[0])
transposed_weight = top.TransposeOp(*self.get_tensor_type([transposed_weight_shape], dtype), weight, 0, 1, loc=self.get_loc((node.name + '_transposed_weight_2')), ip=self.insert_point).output
grad_input_kernel_shape = list(node.args[0].meta['val'].size())[(- 1)]
grad_input_output_shape = list(node.args[1].meta['val'].size())[(- 1)]
output_padding = (((grad_input_output_shape - (strides[0] * (grad_input_kernel_shape - 1))) + (2 * pads[0])) - kernel_shape[0])
output_padding = ([output_padding] * 2)
grad_input = top.DeconvOp(*self.get_tensor_type([shape0], dtype), grad_out, transposed_weight, bias_op, kernel_shape=kernel_shape, strides=strides, pads=pads, group=group, dilations=dilations, output_padding=output_padding, do_relu=False, loc=self.get_loc((node.name + '_grad_input')), ip=self.insert_point).output
self.operands[node] = [grad_input, transposed_grad_weight, None]
def convert_sum_op(self, node):
op0 = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
in_shape = list(node.args[0].meta['val'].size())
new_op = top.ReduceOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op0, axes=(node.args[1] if (len(node.args) > 1) else tuple(range(len(in_shape)))), keepdims=(node.args[2] if (len(node.args) > 2) else False), mode=StringAttr.get('ReduceSum'), loc=self.get_loc(node), ip=self.insert_point).output
self.operands[node] = new_op
def convert_mean_op(self, node):
op0 = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
in_shape = list(node.args[0].meta['val'].size())
new_op = top.ReduceOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op0, axes=(sorted(node.args[1]) if (len(node.args) > 1) else tuple(range(len(in_shape)))), keepdims=(node.args[2] if (len(node.args) > 2) else False), mode=StringAttr.get('ReduceMean'), loc=self.get_loc(node), ip=self.insert_point).output
self.operands[node] = new_op
def convert_getitem_op(self, node):
self.operands[node] = self.operands[node.args[0]][node.args[1]]
def convert_to_op(self, node):
op0 = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
shape = self.get_output_shapes(node)
new_op = top.CastOp(*self.get_tensor_type(shape, dtype), op0, loc=self.get_loc(node), ip=self.insert_point).output
new_op2 = top.WeightReorderOp(*self.get_tensor_type(shape, dtype), new_op, loc=self.get_loc(f'{node}_weightReorder'), ip=self.insert_point).output
self.operands[node] = new_op2
def create_weight_op(self, name, arg, data_type='F32'):
arg_t = self.convert_scalar_param(arg)
arg_shape = list(arg_t.shape)
if (name in self.load_weight):
(_op, _shape, _type) = self.load_weight[name]
if ((_shape != arg_shape) or (_type != data_type)):
raise RuntimeError('{} weight conflict'.format(name))
return _op
tensor_type = RankedTensorType.get(arg_shape, self.mlir_type[data_type])
op = Operation.create('top.Weight', results=[tensor_type], loc=Location.fused([Location.name(name)]))
self.insert_point.insert(op)
result = op.results[0]
self.load_weight[name] = (result, arg_shape, data_type)
self.weights_data[name] = arg_t
return result
def convert_arange_op(self, node):
dtype = self.get_output_dtypes(node)
if (node.args[0] in self.operands):
start = self.operands[node.args[0]]
else:
start = self.create_weight_op(f'arangeOp_{node.name}_start', node.args[0])
if (node.args[1] in self.operands):
end = self.operands[node.args[1]]
else:
end = self.create_weight_op(f'arangeOp_{node.name}_end', node.args[1])
if (len(node.args) > 2):
step = self.create_weight_op(f'arangeOp_{node.name}_step', node.args[2])
else:
step = self.none_op
new_op = top.ArangeOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), start, end, step, loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_op
def convert_slice_op(self, node):
op0 = self.operands[node.args[0]]
axis = self.create_weight_op(f'sliceOp_{node.name}_axis', node.args[1])
start = self.create_weight_op(f'sliceOp_{node.name}_start', node.args[2])
end = self.create_weight_op(f'sliceOp_{node.name}_end', node.args[3])
if (len(node.args) > 4):
step = self.create_weight_op(f'sliceOp_{node.name}_step', node.args[4])
else:
step = self.create_weight_op(f'sliceOp_{node.name}_step', 1)
dtype = self.get_output_dtypes(node)
new_op = top.SliceAxisOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op0, axis, start, step, end, loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_op
def convert_embedding_op(self, node):
dtype = self.get_output_dtypes(node)
if (node.args[0] in self.operands):
weight = self.operands[node.args[0]]
else:
weight = self.create_weight_op(f'embeddingOp_{node.name}_input1', node.args[0])
if (node.args[1] in self.operands):
indices = self.operands[node.args[1]]
else:
indices = self.create_weight_op(f'embeddingOp_{node.name}_input2', node.args[1])
new_op = top.GatherOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), weight, indices, axis=0, loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_op
def convert_compare_op(self, node, mode):
assert (mode in ('Equal', 'Greater', 'GreaterOrEqual', 'Less', 'LessOrEqual', 'NotEqual'))
op0 = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
if (node.args[1] in self.operands):
op1 = self.operands[node.args[1]]
else:
op1 = self.create_weight_op(f'compareOp_{node.name}_input1', node.args[1])
new_op = top.CompareOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op0, op1, mode=StringAttr.get(mode), loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_op
def convert_where_op(self, node):
op0 = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
x_is_const = y_is_const = False
x_const_val = y_const_val = 0
if (node.args[1] in self.operands):
op1 = self.operands[node.args[1]]
else:
x_is_const = True
op1 = self.none_op
x_const_val = node.args[1]
if (node.args[2] in self.operands):
op2 = self.operands[node.args[2]]
else:
y_is_const = True
op2 = self.none_op
y_const_val = node.args[2]
new_op = top.WhereOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op0, op1, op2, x_is_const=x_is_const, y_is_const=y_is_const, x_const_val=x_const_val, y_const_val=y_const_val, loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_op
def convert_copy_op(self, node):
op0 = self.operands[node.args[0]]
self.operands[node] = op0
def convert_rsqrt_op(self, node):
dtype = self.get_output_dtypes(node)
op0 = self.operands[node.args[0]]
shape = self.get_output_shapes(node)
length = 1
for n in shape[0]:
length *= n
new_op = top.RsqrtOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op0, length, loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_op
def convert_sub_op(self, node, is_reverse=False):
dtype = self.get_output_dtypes(node)
shape = self.get_output_shapes(node)
op0 = self.operands[node.args[0]]
if isinstance(node.args[1], torch.fx.Node):
op1 = self.operands[node.args[1]]
new_op = top.SubOp(*self.get_tensor_type(shape, dtype), [op0, op1], is_reverse=is_reverse, loc=self.get_loc(node.name), ip=self.insert_point).output
else:
op1 = node.args[1]
new_op = top.SubConstOp(*self.get_tensor_type(shape, dtype), op0, op1, is_reverse=is_reverse, loc=self.get_loc(node), ip=self.insert_point).output
self.operands[node] = new_op
def convert_addmm_op(self, node):
dtype = self.get_output_dtypes(node)
shape = self.get_output_shapes(node)
op0 = self.operands[node.args[0]]
mat1_op = self.operands[node.args[1]]
mat2_op = self.operands[node.args[2]]
if (len(node.args) == 5):
beta = self.const_val[node.args[3]]
alpha = self.const_val[node.inputs[4]]
else:
beta = 1.0
alpha = 1.0
mm_op = top.MatMulOp(*self.get_tensor_type(shape, dtype), mat1_op, mat2_op, self.none_op, do_relu=False, loc=self.get_loc((node.name + '_mm')), ip=self.insert_point).output
new_op = top.AddOp(*self.get_tensor_type(shape, dtype), [op0, mm_op], coeff=[beta, alpha], loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_op
def convert_split_op(self, node):
dtype = self.get_output_dtypes(node)
op0 = self.operands[node.args[0]]
axis = node.args[2]
split_size = node.args[1]
if isinstance(split_size, int):
num = len(node.meta['val'])
split_size = ([split_size] * num)
else:
num = len(split_size)
names = [((node.name + '_') + str(i)) for i in range(num)]
output = top.SplitOp(self.get_tensor_type(self.get_output_shapes(node), dtype), op0, axis, num, split_size=split_size, loc=self.get_loc(names), ip=self.insert_point)
self.operands[node] = output.outputs
def convert_expand_op(self, node):
dtype = self.get_output_dtypes(node)
shape = self.get_output_shapes(node)
opI = self.operands[node.args[0]]
if (isinstance(node.args[1], torch.fx.Node) and (node.args[1] in self.operands)):
opS = self.operands[node.args[1]]
else:
opS = self.create_weight_op(f'expandOp_{node.name}_input1', node.args[1])
new_cf = top.ConstantFillOp(*self.get_tensor_type(shape, dtype), opS, value=1.0, loc=self.get_loc((node.name + '_size')), ip=self.insert_point).output
new_exp = top.MulOp(*self.get_tensor_type(shape, dtype), [opI, new_cf], do_relu=False, loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_exp
def convert_expand_op2(self, node):
dtype = self.get_output_dtypes(node)
opI = self.operands[node.args[0]]
new_exp = top.ExpandOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), opI, shape=node.args[1], loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_exp
def convert_broadcast_op(self, node):
dtype = self.get_output_dtypes(node)
op = self.operands[node.args[0]]
repeat_shape = node.args[1]
axis = node.args[2]
unsqueeze_shape = ([1] * len(repeat_shape))
unsqueeze_axis = list(range(len(repeat_shape)))
for idx in range(len(repeat_shape)):
if (idx in axis):
unsqueeze_axis.remove(idx)
unsqueeze_shape[idx] = repeat_shape[idx]
unsqueeze_op = top.UnsqueezeOp(*self.get_tensor_type([unsqueeze_shape], dtype), op, unsqueeze_axis, loc=self.get_loc((node.name + '_unsqueeze')), ip=self.insert_point).output
new_op = top.ExpandOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), unsqueeze_op, shape=repeat_shape, loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_op
def convert_amax_op(self, node, index):
dtype = self.get_output_dtypes(node)
op0 = self.operands[node.args[0]]
dim = node.args[1][0]
keepdims = node.args[2]
select_last_index = True
new_op = top.ArgOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), *self.get_tensor_type(self.get_output_shapes(node), dtype), input=op0, axis=dim, keepdims=keepdims, mode=StringAttr.get('ArgMax'), select_last_index=select_last_index, loc=self.get_loc(node.name), ip=self.insert_point)
if index:
out_ops = [new_op.values, new_op.indices]
else:
out_ops = new_op.values
self.operands[node] = out_ops
def convert_gather_op(self, node):
op0 = self.operands[node.args[0]]
axis = node.args[1]
op1 = self.operands[node.args[2]]
dtype = self.get_output_dtypes(node)
new_op = top.GatherElementsOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op0, op1, axis=axis, loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_op
def convert_neg_op(self, node):
op0 = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
new_op = top.MulConstOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op0, const_val=(- 1), loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_op
def convert_transpose_op(self, node):
op0 = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
no_dims = (len(node.args) == 1)
dim0 = (node.args[1] if (not no_dims) else 0)
dim1 = (node.args[2] if (not no_dims) else 1)
new_op = top.TransposeOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op0, dim0, dim1, loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_op
def convert_batch_norm_op(self, node):
dtype = self.get_output_dtypes(node)
op0 = self.operands[node.args[0]]
weight = self.operands[node.args[1]]
bias = self.operands[node.args[2]]
mean = self.operands[node.args[3]]
var = self.operands[node.args[4]]
momentum = node.args[6]
eps = node.args[7]
out = top.BatchNormTrainOp(*self.get_tensor_type(self.get_output_shapes(node, 2), dtype), op0, mean=mean, variance=var, gamma=weight, beta=bias, epsilon=eps, momentum=momentum, loc=self.get_loc(node), ip=self.insert_point)
self.operands[node] = [out.output, out.mean_out, out.variance_out, mean, var]
def convert_batch_norm_backward_op(self, node):
grad_out = self.operands[node.args[0]]
input = self.operands[node.args[1]]
weight = self.operands[node.args[2]]
mean = self.operands[node.args[5]]
invstd = self.operands[node.args[6]]
eps = node.args[8]
output_mask = node.args[(- 1)]
dtype = self.get_output_dtypes(node)
gradinput = gradweight = gradbias = self.none_op
out = top.BatchNormBwdOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), grad_out, input, weight, mean, invstd, epsilon=eps, loc=self.get_loc([(node.name + 'grad_input'), (node.name + 'grad_weight'), (node.name + 'grad_bias')]), ip=self.insert_point)
if output_mask[2]:
gradbias = out.bias_grad
if output_mask[1]:
gradweight = out.weight_grad
if output_mask[0]:
gradinput = out.grad_in
self.operands[node] = [gradinput, gradweight, gradbias]
def convert_layer_norm_backward_op(self, node):
grad_out = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
assert (node.args[1] in self.operands)
input = self.operands[node.args[1]]
normalized_shape = node.args[2]
assert (node.args[3] in self.operands)
mean = self.operands[node.args[3]]
assert (node.args[4] in self.operands)
rstd = self.operands[node.args[4]]
weight_opt = (self.operands[node.args[5]] if (node.args[5] in self.operands) else self.none_op)
bias_opt = (self.operands[node.args[6]] if (node.args[6] in self.operands) else self.none_op)
out = top.LayerNormBwdOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), grad_out, input, mean, rstd, weight_opt, bias_opt, normalized_shape=normalized_shape, loc=self.get_loc([(node.name + '_grad_input'), (node.name + '_grad_weight'), (node.name + '_grad_bias')]), ip=self.insert_point)
self.operands[node] = [out.grad_input, out.grad_weight, out.grad_bias]
def convert_layer_norm_op(self, node):
op0 = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
shape0 = list(node.meta['val'][0].size())
shape1 = list(node.meta['val'][1].size())
shape2 = list(node.meta['val'][2].size())
normalized_shape = node.args[1]
assert (node.args[2] in self.operands)
scale_opd = self.operands[node.args[2]]
assert (node.args[3] in self.operands)
bias_opd = self.operands[node.args[3]]
eps = node.args[4]
axis = np.atleast_1d((- len(normalized_shape))).astype(np.int32)
out = top.LayerNormTrainOp(*self.get_tensor_type([shape0, shape1, shape2], dtype), op0, scale_opd, bias_opd, normalized_shape=normalized_shape, axis=axis, eps=eps, loc=self.get_loc([node.name, (node.name + '_Mean'), (node.name + '_Rstd')]), ip=self.insert_point)
new_op = out.output
mean = out.mean
rstd = out.variance
self.operands[node] = [new_op, mean, rstd]
def convert_softmax_op(self, node, log):
op0 = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
shape = list(node.meta['val'].size())
in_dim_len = len(list(node.args[0].meta['val'].size()))
tmp = ((node.args[1] + in_dim_len) if (node.args[1] < 0) else node.args[1])
dim = np.atleast_1d(tmp).astype(np.int32)
new_op = top.SoftmaxOp(*self.get_tensor_type([shape], dtype), op0, axis=dim, log=log, loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_op
def convert_nllloss_op(self, node):
op0 = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
shape = list(node.meta['val'][0].size())
shape = ([1] if (shape == []) else shape)
op1 = self.operands[node.args[1]]
if (node.args[2] != None):
weight = self.create_weight_op(f'nlllossOp_{node.name}_input1', node.args[2])
else:
weight = self.none_op
redecution = node.args[3]
ignore_index = np.atleast_1d(node.args[4]).astype(np.int32)
new_op = top.NLLlossOp(*self.get_tensor_type([shape], dtype), *self.get_tensor_type([shape], dtype), op0, op1, weight, redecution, ignore_index, loc=self.get_loc(node.name), ip=self.insert_point)
self.operands[node] = [new_op.output, new_op.total_weight]
def convert_var_op(self, node):
op0 = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
reduce_list = node.args[1]
correction = node.kwargs['correction']
new_op = top.VarianceOp(*self.get_tensor_type(self.get_output_shapes(node), dtype), op0, reduce_list, correction, loc=self.get_loc(node.name), ip=self.insert_point).output
self.operands[node] = new_op
def convert_select_op(self, node):
pass
def convert_math_op(self, node, mode):
assert (mode in ['cos', 'cosh', 'sin', 'sinh', 'tan', 'tanh', 'exp', 'erf', 'log'])
op0 = self.operands[node.args[0]]
dtype = self.get_output_dtypes(node)
shape = list(node.meta['val'].size())
cmd = ('top.%sOp(*self.get_tensor_type([shape], dtype), op0, loc=self.get_loc(node.name), ip=self.insert_point).output' % mode.capitalize())
new_op = eval(cmd)
self.operands[node] = new_op
def convert_clone_op(self, node):
assert (len(node.args) == 1)
self.operands[node] = self.operands[node.args[0]]
def convert_unsqueeze_op(self, node):
op0 = self.operands[node.args[0]]
axis = node.args[1]
dtype = self.get_output_dtypes(node)
shape = list(node.meta['val'].size())
new_op = top.UnsqueezeOp(*self.get_tensor_type([shape], dtype), op0, axes=[axis], loc=self.get_loc(node), ip=self.insert_point).output
self.operands[node] = new_op
def create_return_op(self, Operands):
return_op = Operation.create('func.return', operands=Operands, results=[])
self.insert_point.insert(return_op)
return return_op
def WeightToNpz(self, weight_file):
tensor_npz = {}
for name in self.weights_data:
tensor_npz[name] = self.weights_data[name]
np.savez(weight_file, **tensor_npz) |
class HardMishJitAutoFn(torch.autograd.Function):
def forward(ctx, x):
ctx.save_for_backward(x)
return hard_mish_jit_fwd(x)
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return hard_mish_jit_bwd(x, grad_output) |
class AmpProblemTest(unittest.TestCase):
def setUp(self):
H0 = (50000.0, 90.0, 0.0)
M = np.array([45.0, 90.0])
chi_e = 0.05
[xx, yy] = np.meshgrid(np.linspace((- 200), 200, 50), np.linspace((- 200), 200, 50))
b = 100
A = 50
zz = (A * np.exp(((- 0.5) * (((xx / b) ** 2.0) + ((yy / b) ** 2.0)))))
topo = np.c_[(mkvc(xx), mkvc(yy), mkvc(zz))]
xr = np.linspace((- 100.0), 100.0, 20)
yr = np.linspace((- 100.0), 100.0, 20)
(X, Y) = np.meshgrid(xr, yr)
Z = ((A * np.exp(((- 0.5) * (((X / b) ** 2.0) + ((Y / b) ** 2.0))))) + 10)
rxLoc = np.c_[(mkvc(X.T), mkvc(Y.T), mkvc(Z.T))]
receiver_list = magnetics.receivers.Point(rxLoc)
srcField = magnetics.sources.SourceField(receiver_list=[receiver_list], parameters=H0)
survey = magnetics.survey.Survey(srcField)
h = [5, 5, 5]
padDist = (np.ones((3, 2)) * 100)
mesh = mesh_builder_xyz(rxLoc, h, padding_distance=padDist, depth_core=100, mesh_type='tree')
mesh = refine_tree_xyz(mesh, topo, method='surface', octree_levels=[4, 4], finalize=True)
actv = active_from_xyz(mesh, topo)
nC = int(actv.sum())
M_xyz = utils.mat_utils.dip_azimuth2cartesian((np.ones(nC) * M[0]), (np.ones(nC) * M[1]))
ind = utils.model_builder.get_indices_block(np.r_[((- 20), (- 20), (- 10))], np.r_[(20, 20, 25)], mesh.gridCC)[0]
model = np.zeros(mesh.nC)
model[ind] = chi_e
model = model[actv]
idenMap = maps.IdentityMap(nP=nC)
simulation = magnetics.Simulation3DIntegral(survey=survey, mesh=mesh, chiMap=idenMap, ind_active=actv, store_sensitivities='forward_only')
simulation.M = M_xyz
synthetic_data = simulation.dpred(model)
nD = rxLoc.shape[0]
std = 5
synthetic_data += (np.random.randn(nD) * std)
wd = (np.ones(nD) * std)
data_object = data.Data(survey, dobs=synthetic_data, noise_floor=wd)
surf = utils.model_utils.surface_layer_index(mesh, topo)
nC = np.count_nonzero(surf)
mstart = (np.ones(nC) * 0.0001)
idenMap = maps.IdentityMap(nP=nC)
simulation = magnetics.simulation.Simulation3DIntegral(mesh=mesh, survey=survey, chiMap=idenMap, ind_active=surf, store_sensitivities='ram')
simulation.model = mstart
reg = regularization.Sparse(mesh, indActive=surf, mapping=maps.IdentityMap(nP=nC), alpha_z=0)
reg.mref = np.zeros(nC)
opt = optimization.ProjectedGNCG(maxIter=10, lower=(- np.inf), upper=np.inf, maxIterLS=5, maxIterCG=5, tolCG=0.001)
dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_object)
invProb = inverse_problem.BaseInvProblem(dmis, reg, opt)
betaest = directives.BetaEstimate_ByEig(beta0_ratio=2)
IRLS = directives.Update_IRLS(f_min_change=0.001, minGNiter=1, beta_tol=0.1, max_irls_iterations=5)
update_Jacobi = directives.UpdatePreconditioner()
inv = inversion.BaseInversion(invProb, directiveList=[betaest, IRLS, update_Jacobi])
print('Solving for Equivalent Source')
mrec = inv.run(mstart)
receiver_list = magnetics.receivers.Point(rxLoc, components=['bx', 'by', 'bz'])
srcField = magnetics.sources.SourceField(receiver_list=[receiver_list], parameters=H0)
surveyAmp = magnetics.survey.Survey(srcField)
simulation = magnetics.simulation.Simulation3DIntegral(mesh=mesh, survey=surveyAmp, chiMap=idenMap, ind_active=surf, is_amplitude_data=True, store_sensitivities='forward_only')
bAmp = simulation.fields(mrec)
nC = int(actv.sum())
idenMap = maps.IdentityMap(nP=nC)
mstart = (np.ones(nC) * 0.0001)
simulation = magnetics.simulation.Simulation3DIntegral(survey=surveyAmp, mesh=mesh, chiMap=idenMap, ind_active=actv, is_amplitude_data=True)
data_obj = data.Data(survey, dobs=bAmp, noise_floor=wd)
reg = regularization.Sparse(mesh, indActive=actv, mapping=idenMap)
reg.norms = [1, 0, 0, 0]
reg.mref = np.zeros(nC)
dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_obj)
opt = optimization.ProjectedGNCG(maxIter=10, lower=0.0, upper=1.0, maxIterLS=5, maxIterCG=5, tolCG=0.001)
invProb = inverse_problem.BaseInvProblem(dmis, reg, opt)
betaest = directives.BetaEstimate_ByEig(beta0_ratio=1)
IRLS = directives.Update_IRLS(max_irls_iterations=5, f_min_change=0.001, minGNiter=1, coolingRate=1, beta_search=False)
update_SensWeight = directives.UpdateSensitivityWeights()
update_Jacobi = directives.UpdatePreconditioner()
self.inv = inversion.BaseInversion(invProb, directiveList=[update_SensWeight, betaest, IRLS, update_Jacobi])
self.mstart = mstart
self.model = model
self.sim = simulation
def test_mag_inverse(self):
mrec_Amp = self.inv.run(self.mstart)
residual = (np.linalg.norm((mrec_Amp - self.model)) / np.linalg.norm(self.model))
self.assertTrue((residual < 1.0))
def tearDown(self):
if (self.sim.store_sensitivities == 'disk'):
try:
shutil.rmtree(self.sim.sensitivity_path)
except FileNotFoundError:
pass |
def clip_gradient(optimizer, grad_clip):
assert (grad_clip > 0), 'gradient clip value must be greater than 1'
for group in optimizer.param_groups:
for param in group['params']:
if (param.grad is None):
continue
param.grad.data.clamp_((- grad_clip), grad_clip) |
def _chi(state: State, action):
c_p = state.current_player
tar_p = ((c_p + 3) % 4)
tar = state._target
state = _accept_riichi(state)
meld = Meld.init(action, tar, src=jnp.int32(3))
state = _append_meld(state, meld, c_p)
hand = state._hand.at[c_p].set(Hand.chi(state._hand[c_p], tar, action))
is_menzen = state._is_menzen.at[c_p].set(FALSE)
legal_action_mask = jnp.zeros(NUM_ACTION, dtype=jnp.bool_)
legal_action_mask = legal_action_mask.at[:34].set((hand[c_p] > 0))
river = state._river.at[(tar_p, (state._n_river[tar_p] - 1))].set((state._river[(tar_p, (state._n_river[tar_p] - 1))] | jnp.uint8(128)))
return state.replace(_target=jnp.int8((- 1)), _is_menzen=is_menzen, _hand=hand, legal_action_mask=legal_action_mask, _river=river) |
def parse_xml(tree):
documents = []
root = tree.getroot()
for document in root:
if (document.tag != 'document'):
raise ValueError('Unexpected orchid xml layout: {}'.format(document.tag))
paragraphs = []
for paragraph in document:
if (paragraph.tag != 'paragraph'):
raise ValueError('Unexpected orchid xml layout: {} under {}'.format(paragraph.tag, document.tag))
sentences = []
for sentence in paragraph:
if (sentence.tag != 'sentence'):
raise ValueError('Unexpected orchid xml layout: {} under {}'.format(sentence.tag, document.tag))
if (sentence.attrib['line_num'] in skipped_lines):
continue
words = []
for (word_idx, word) in enumerate(sentence):
if (word.tag != 'word'):
raise ValueError('Unexpected orchid xml layout: {} under {}'.format(word.tag, sentence.tag))
word = word.attrib['surface']
word = escape_sequences.get(word, word)
if (word == '<space>'):
if (word_idx == 0):
raise ValueError('Space character was the first token in a sentence: {}'.format(sentence.attrib['line_num']))
else:
words[(- 1)] = (words[(- 1)][0], True)
continue
if ((len(word) > 1) and (word[0] == '<') and (word not in allowed_sequences)):
raise ValueError('Unknown escape sequence {}'.format(word))
words.append((word, False))
if (len(words) == 0):
continue
words[(- 1)] = (words[(- 1)][0], True)
sentences.append(words)
paragraphs.append(sentences)
documents.append(paragraphs)
return documents |
def _edge_matcher(digraph, nxpattern, node_pred, edge_pred):
pedge = next(iter(nxpattern.edges))
pu = nxpattern.nodes[pedge[0]]
pv = nxpattern.nodes[pedge[1]]
if (edge_pred is None):
for (u, v) in digraph.edges:
if (node_pred(digraph.nodes[u], pu) and node_pred(digraph.nodes[v], pv)):
if (u is v):
continue
(yield {u: pedge[0], v: pedge[1]})
else:
for (u, v) in digraph.edges:
if (node_pred(digraph.nodes[u], pu) and node_pred(digraph.nodes[v], pv) and edge_pred(digraph.edges[(u, v)], nxpattern.edges[pedge])):
if (u is v):
continue
(yield {u: pedge[0], v: pedge[1]}) |
class sfftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name': 'sfftw threads', 'libs': ['srfftw_threads', 'sfftw_threads'], 'includes': ['sfftw_threads.h', 'srfftw_threads.h'], 'macros': [('SCIPY_SFFTW_THREADS_H', None)]}] |
def mobilenet_v1_arg_scope(is_training=True, weight_decay=4e-05, stddev=0.09, regularize_depthwise=False, batch_norm_decay=0.9997, batch_norm_epsilon=0.001):
batch_norm_params = {'center': True, 'scale': True, 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon}
if (is_training is not None):
batch_norm_params['is_training'] = is_training
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
if regularize_depthwise:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], weights_initializer=weights_init, activation_fn=tf.nn.relu6, normalizer_fn=slim.batch_norm):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d], weights_regularizer=depthwise_regularizer) as sc:
return sc |
class TilingStrategyDummy(TilingStrategy):
def get_number_of_spatial_sample_per_image(self):
return 1
def get_window(self, idx):
return None |
class TensorDataset(torch.utils.data.Dataset):
def __init__(self, images, labels, transform=None):
self.images = images.detach().cpu().float()
self.targets = labels.detach().cpu()
self.transform = transform
def __getitem__(self, index):
sample = self.images[index]
if (self.transform != None):
sample = self.transform(sample)
target = self.targets[index]
return (sample, target)
def __len__(self):
return self.images.shape[0] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.