code stringlengths 101 5.91M |
|---|
class C51(DQN):
def __init__(self, c: int, h: int, w: int, action_shape: Sequence[int], num_atoms: int=51, device: Union[(str, int, torch.device)]='cpu') -> None:
self.action_num = np.prod(action_shape)
super().__init__(c, h, w, [(self.action_num * num_atoms)], device)
self.num_atoms = num_atoms
def forward(self, x: Union[(np.ndarray, torch.Tensor)], state: Optional[Any]=None, info: Optional[Dict[(str, Any)]]=None) -> Tuple[(torch.Tensor, Any)]:
(x, state) = super().forward(x)
x = x.view((- 1), self.num_atoms).softmax(dim=(- 1))
x = x.view((- 1), self.action_num, self.num_atoms)
return (x, state) |
def WithParams(t, p):
t = _to_tactic(t, None)
return Tactic(Z3_tactic_using_params(t.ctx.ref(), t.tactic, p.params), t.ctx) |
def test_compound_open(model=None):
if (model is None):
model = SimpleModel()
state = build_initial_state(model)[0]
open_transition = parse_transitions.OpenConstituent('ROOT', 'S')
assert open_transition.is_legal(state, model)
shift = parse_transitions.Shift()
close_transition = parse_transitions.CloseConstituent()
state = open_transition.apply(state, model)
state = shift.apply(state, model)
state = shift.apply(state, model)
state = shift.apply(state, model)
state = close_transition.apply(state, model)
tree = model.get_top_constituent(state.constituents)
assert (tree.label == 'ROOT')
assert (len(tree.children) == 1)
tree = tree.children[0]
assert (tree.label == 'S')
assert (len(tree.children) == 3)
assert (tree.children[0].children[0].label == 'Unban')
assert (tree.children[1].children[0].label == 'Mox')
assert (tree.children[2].children[0].label == 'Opal') |
_data_model
class BufferBinding():
def __init__(self, j: Dict[(str, Any)]) -> None:
binding = j['binding']
buffer = j['buffer']
self.binding: int = int(binding)
self.buffer: Buffer = Buffer(buffer) |
class AutoModel():
def __init__(self):
raise EnvironmentError('AutoModel is designed to be instantiated using the `AutoModel.from_pretrained(pretrained_model_name_or_path)` or `AutoModel.from_config(config)` methods.')
_list_option_in_docstrings(MODEL_MAPPING, use_model_types=False)
def from_config(cls, config):
if (type(config) in MODEL_MAPPING.keys()):
return MODEL_MAPPING[type(config)](config)
raise ValueError('Unrecognized configuration class {} for this kind of AutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in MODEL_MAPPING.keys()))))
_list_option_in_docstrings(MODEL_MAPPING)
_start_docstrings('Instantiate one of the base model classes of the library from a pretrained model.', AUTO_MODEL_PRETRAINED_DOCSTRING)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop('config', None)
if (not isinstance(config, PretrainedConfig)):
(config, kwargs) = AutoConfig.from_pretrained(pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs)
if (type(config) in MODEL_MAPPING.keys()):
return MODEL_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError('Unrecognized configuration class {} for this kind of AutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in MODEL_MAPPING.keys())))) |
def register_Ns3EpcX2SapSecondaryHandoverParams_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::EpcX2Sap::SecondaryHandoverParams const &', 'arg0')])
cls.add_instance_attribute('imsi', 'uint64_t', is_const=False)
cls.add_instance_attribute('oldCellId', 'uint16_t', is_const=False)
cls.add_instance_attribute('targetCellId', 'uint16_t', is_const=False)
return |
class PointNet2ClsSsg(nn.Module):
def __init__(self, num_classes=40):
super(PointNet2ClsSsg, self).__init__()
self.sa1 = PointNetSetAbstraction(npoint=512, radius=0.2, nsample=32, in_channel=3, mlp=[64, 64, 128], group_all=False)
self.sa2 = PointNetSetAbstraction(npoint=128, radius=0.4, nsample=64, in_channel=(128 + 3), mlp=[128, 128, 256], group_all=False)
self.sa3 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=(256 + 3), mlp=[256, 512, 1024], group_all=True)
self.fc1 = nn.Linear(1024, 512)
self.bn1 = nn.BatchNorm1d(512)
self.drop1 = nn.Dropout(0.4)
self.fc2 = nn.Linear(512, 256)
self.bn2 = nn.BatchNorm1d(256)
self.drop2 = nn.Dropout(0.4)
self.fc3 = nn.Linear(256, num_classes)
def forward(self, xyz):
(B, _, _) = xyz.shape
(l1_xyz, l1_points) = self.sa1(xyz, None)
(l2_xyz, l2_points) = self.sa2(l1_xyz, l1_points)
(l3_xyz, l3_points) = self.sa3(l2_xyz, l2_points)
x = l3_points.view(B, 1024)
x = self.drop1(F.relu(self.bn1(self.fc1(x))))
x = self.drop2(F.relu(self.bn2(self.fc2(x))))
x = self.fc3(x)
x = F.log_softmax(x, (- 1))
return x |
def build_generic_retinanet_model(model, add_conv_body_func, freeze_conv_body=False):
def _single_gpu_build_func(model):
(blobs, dim, spatial_scales) = add_conv_body_func(model)
if (not model.train):
model.conv_body_net = model.net.Clone('conv_body_net')
retinanet_heads.add_fpn_retinanet_outputs(model, blobs, dim, spatial_scales)
if model.train:
loss_gradients = retinanet_heads.add_fpn_retinanet_losses(model)
return (loss_gradients if model.train else None)
optim.build_data_parallel_model(model, _single_gpu_build_func)
return model |
def _squared_error(trajectory1: np.ndarray, trajectory2: np.ndarray) -> np.ndarray:
(trajectory1, trajectory2) = pad_shorter_trajectory(trajectory1, trajectory2)
return np.power((trajectory1 - trajectory2), 2).sum((- 1)) |
class AMR(object):
def __init__(self, node_list=None, node_value_list=None, relation_list=None, attribute_list=None):
if (node_list is None):
self.nodes = []
self.root = None
else:
self.nodes = node_list[:]
if (len(node_list) != 0):
self.root = node_list[0]
else:
self.root = None
if (node_value_list is None):
self.node_values = []
else:
self.node_values = node_value_list[:]
if (relation_list is None):
self.relations = []
else:
self.relations = relation_list[:]
if (attribute_list is None):
self.attributes = []
else:
self.attributes = attribute_list[:]
def rename_node(self, prefix):
node_map_dict = {}
for i in range(0, len(self.nodes)):
node_map_dict[self.nodes[i]] = (prefix + str(i))
for (i, v) in enumerate(self.nodes):
self.nodes[i] = node_map_dict[v]
for node_relations in self.relations:
for (i, l) in enumerate(node_relations):
node_relations[i][1] = node_map_dict[l[1]]
def get_triples(self):
instance_triple = []
relation_triple = []
attribute_triple = []
for i in range(len(self.nodes)):
instance_triple.append(('instance', self.nodes[i], self.node_values[i]))
for l in self.relations[i]:
relation_triple.append((l[0], self.nodes[i], l[1]))
for l in self.attributes[i]:
attribute_triple.append((l[0], self.nodes[i], l[1]))
return (instance_triple, attribute_triple, relation_triple)
def get_triples2(self):
instance_triple = []
relation_triple = []
for i in range(len(self.nodes)):
instance_triple.append(('instance', self.nodes[i], self.node_values[i]))
for l in self.relations[i]:
relation_triple.append((l[0], self.nodes[i], l[1]))
for l in self.attributes[i]:
relation_triple.append((l[0], self.nodes[i], l[1]))
return (instance_triple, relation_triple)
def __str__(self):
lines = []
for i in range(len(self.nodes)):
lines.append(((('Node ' + str(i)) + ' ') + self.nodes[i]))
lines.append(('Value: ' + self.node_values[i]))
lines.append('Relations:')
for relation in self.relations[i]:
lines.append(((('Node ' + relation[1]) + ' via ') + relation[0]))
for attribute in self.attributes[i]:
lines.append(((('Attribute: ' + attribute[0]) + ' value ') + attribute[1]))
return '\n'.join(lines)
def __repr__(self):
return self.__str__()
def output_amr(self):
print(self.__str__(), file=DEBUG_LOG)
def get_amr_line(input_f):
cur_amr = []
has_content = False
for line in input_f:
line = line.strip()
if (line == ''):
if (not has_content):
continue
else:
break
if line.strip().startswith('#'):
continue
else:
has_content = True
cur_amr.append(line.strip())
return ''.join(cur_amr)
def parse_AMR_line(line):
exceptions = set(['prep-on-behalf-of', 'prep-out-of', 'consist-of'])
def update_triple(node_relation_dict, u, r, v):
if (r.endswith('-of') and (not (r in exceptions))):
node_relation_dict[v].append((r[:(- 3)], u))
elif (r == 'mod'):
node_relation_dict[v].append(('domain', u))
else:
node_relation_dict[u].append((r, v))
state = 0
stack = []
cur_charseq = []
node_dict = {}
node_name_list = []
node_relation_dict1 = defaultdict(list)
node_relation_dict2 = defaultdict(list)
cur_relation_name = ''
in_quote = False
for (i, c) in enumerate(line.strip()):
if (c == ' '):
if (state == 2):
cur_charseq.append(c)
continue
if (c == '"'):
if in_quote:
cur_charseq.append('_')
in_quote = (not in_quote)
elif (c == '('):
if in_quote:
cur_charseq.append(c)
continue
if (state == 2):
if (cur_relation_name != ''):
print('Format error when processing ', line[0:(i + 1)], file=ERROR_LOG)
return None
cur_relation_name = ''.join(cur_charseq).strip()
cur_charseq[:] = []
state = 1
elif (c == ':'):
if in_quote:
cur_charseq.append(c)
continue
if (state == 3):
node_value = ''.join(cur_charseq)
cur_charseq[:] = []
cur_node_name = stack[(- 1)]
node_dict[cur_node_name] = node_value
elif (state == 2):
temp_attr_value = ''.join(cur_charseq)
cur_charseq[:] = []
parts = temp_attr_value.split()
if (len(parts) < 2):
print('Error in processing; part len < 2', line[0:(i + 1)], file=ERROR_LOG)
return None
relation_name = parts[0].strip()
relation_value = parts[1].strip()
if (len(stack) == 0):
print('Error in processing', line[:i], relation_name, relation_value, file=ERROR_LOG)
return None
if (relation_value not in node_dict):
update_triple(node_relation_dict2, stack[(- 1)], relation_name, relation_value)
else:
update_triple(node_relation_dict1, stack[(- 1)], relation_name, relation_value)
state = 2
elif (c == '/'):
if in_quote:
cur_charseq.append(c)
continue
if (state == 1):
node_name = ''.join(cur_charseq)
cur_charseq[:] = []
if (node_name in node_dict):
print('Duplicate node name ', node_name, ' in parsing AMR', file=ERROR_LOG)
return None
stack.append(node_name)
node_name_list.append(node_name)
if (cur_relation_name != ''):
update_triple(node_relation_dict1, stack[(- 2)], cur_relation_name, node_name)
cur_relation_name = ''
else:
print('Error in parsing AMR', line[0:(i + 1)], file=ERROR_LOG)
return None
state = 3
elif (c == ')'):
if in_quote:
cur_charseq.append(c)
continue
if (len(stack) == 0):
print('Unmatched parenthesis at position', i, 'in processing', line[0:(i + 1)], file=ERROR_LOG)
return None
if (state == 2):
temp_attr_value = ''.join(cur_charseq)
cur_charseq[:] = []
parts = temp_attr_value.split()
if (len(parts) < 2):
print('Error processing', line[:(i + 1)], temp_attr_value, file=ERROR_LOG)
return None
relation_name = parts[0].strip()
relation_value = parts[1].strip()
if (relation_value not in node_dict):
update_triple(node_relation_dict2, stack[(- 1)], relation_name, relation_value)
else:
update_triple(node_relation_dict1, stack[(- 1)], relation_name, relation_value)
elif (state == 3):
node_value = ''.join(cur_charseq)
cur_charseq[:] = []
cur_node_name = stack[(- 1)]
node_dict[cur_node_name] = node_value
stack.pop()
cur_relation_name = ''
state = 0
else:
cur_charseq.append(c)
node_value_list = []
relation_list = []
attribute_list = []
for v in node_name_list:
if (v not in node_dict):
print('Error: Node name not found', v, file=ERROR_LOG)
return None
else:
node_value_list.append(node_dict[v])
node_rel_list = []
node_attr_list = []
if (v in node_relation_dict1):
for v1 in node_relation_dict1[v]:
node_rel_list.append([v1[0], v1[1]])
if (v in node_relation_dict2):
for v2 in node_relation_dict2[v]:
if ((v2[1][0] == '"') and (v2[1][(- 1)] == '"')):
node_attr_list.append([[v2[0]], v2[1][1:(- 1)]])
elif (v2[1] in node_dict):
node_rel_list.append([v2[0], v2[1]])
else:
node_attr_list.append([v2[0], v2[1]])
relation_list.append(node_rel_list)
attribute_list.append(node_attr_list)
attribute_list[0].append(['TOP', 'top'])
result_amr = AMR(node_name_list, node_value_list, relation_list, attribute_list)
return result_amr |
class OBJReconverter():
def __init__(self):
self.vertex_dict = OrderedDict()
self.PRECISION = 1e-05
self.eps = 1e-07
self.x_axis = gp_Dir(1.0, 0.0, 0.0)
def convert_curve(self, curve):
json_curve = {}
if (curve.type == 'circle'):
json_curve['type'] = 'Circle3D'
json_curve['center_point'] = {'x': curve.center[0], 'y': curve.center[1], 'z': 0}
json_curve['radius'] = curve.radius
if (curve.type == 'line'):
json_curve['type'] = 'Line3D'
json_curve['start_point'] = {'x': curve.start[0], 'y': curve.start[1], 'z': 0}
json_curve['end_point'] = {'x': curve.end[0], 'y': curve.end[1], 'z': 0}
if (curve.type == 'arc'):
json_curve['type'] = 'Arc3D'
json_curve['start_point'] = {'x': curve.start[0], 'y': curve.start[1], 'z': 0}
json_curve['end_point'] = {'x': curve.end[0], 'y': curve.end[1], 'z': 0}
json_curve['mid_point'] = {'x': curve.mid[0], 'y': curve.mid[1], 'z': 0}
json_curve['center_point'] = {'x': curve.center[0], 'y': curve.center[1], 'z': 0}
json_curve['is_outer'] = curve.is_outer
return json_curve
def convert_vertices(self):
vertex_strings = ''
for pt in self.vertex_dict.values():
vertex_string = f'''v {pt[0]} {pt[1]}
'''
vertex_strings += vertex_string
return vertex_strings
def parse_obj(self, faces, meta_info):
for face in faces:
for loop in face:
if (len(loop) > 1):
for (idx, curve) in enumerate(loop[:(- 1)]):
next_curve = np.vstack([loop[(idx + 1)].start, loop[(idx + 1)].end])
diff1 = np.sum(np.abs((curve.start - next_curve)), 1)
diff2 = np.sum(np.abs((curve.end - next_curve)), 1)
if ((min(diff2) == 0) or (min(diff1) == 0)):
continue
assert ((min(diff1) < 0.001) or (min(diff2) < 0.001))
if (min(diff1) > min(diff2)):
min_idx = np.argmin(diff2)
if (min_idx == 0):
loop[(idx + 1)].start_idx = curve.end_idx
loop[(idx + 1)].start = curve.end
else:
loop[(idx + 1)].end_idx = curve.end_idx
loop[(idx + 1)].end = curve.end
else:
min_idx = np.argmin(diff1)
if (min_idx == 0):
loop[(idx + 1)].start_idx = curve.start_idx
loop[(idx + 1)].start = curve.start
else:
loop[(idx + 1)].end_idx = curve.start_idx
loop[(idx + 1)].end = curve.start
shared_idx = list(set([loop[(- 2)].start_idx, loop[(- 2)].end_idx]).intersection(set([loop[(- 1)].start_idx, loop[(- 1)].end_idx])))
assert (len(shared_idx) >= 1)
if (len(shared_idx) == 2):
assert (len(loop) == 2)
else:
if (shared_idx[0] == loop[(- 1)].start_idx):
do_start = False
else:
do_start = True
start_curve = np.vstack([loop[0].start, loop[0].end])
if do_start:
diff = np.sum(np.abs((loop[(- 1)].start - start_curve)), 1)
else:
diff = np.sum(np.abs((loop[(- 1)].end - start_curve)), 1)
assert (min(diff) < 0.001)
min_idx = np.argmin(diff)
if (min_idx == 0):
if do_start:
loop[(- 1)].start_idx = loop[0].start_idx
loop[(- 1)].start = loop[0].start
else:
loop[(- 1)].end_idx = loop[0].start_idx
loop[(- 1)].end = loop[0].start
elif do_start:
loop[(- 1)].start_idx = loop[0].end_idx
loop[(- 1)].start = loop[0].end
else:
loop[(- 1)].end_idx = loop[0].end_idx
loop[(- 1)].end = loop[0].end
extrusion = {}
extrusion['profiles'] = []
for face in faces:
profile = {}
profile['loops'] = []
for loop in face:
pl = {}
pl['profile_curves'] = []
for curve in loop:
pl['profile_curves'].append(self.convert_curve(curve))
profile['loops'].append(pl)
extrusion['profiles'].append(profile)
sketch = {}
transform = {}
transform['origin'] = {'x': meta_info['t_orig'][0], 'y': meta_info['t_orig'][1], 'z': meta_info['t_orig'][2]}
transform['x_axis'] = {'x': meta_info['t_x'][0], 'y': meta_info['t_x'][1], 'z': meta_info['t_x'][2]}
transform['y_axis'] = {'x': meta_info['t_y'][0], 'y': meta_info['t_y'][1], 'z': meta_info['t_y'][2]}
transform['z_axis'] = {'x': meta_info['t_z'][0], 'y': meta_info['t_z'][1], 'z': meta_info['t_z'][2]}
sketch['transform'] = transform
extrude_params = {}
extrude_params['extrude_type'] = meta_info['set_op']
extrude_params['extrude_values'] = meta_info['extrude_value']
all_faces = []
curve_strings = ''
curve_count = 0
for profile in extrusion['profiles']:
(ref_face, face, curve_string, c_count) = self.parse_sketch(sketch, profile)
curve_strings += curve_string
curve_count += c_count
all_faces.append(face)
plane_face = all_faces[0]
for face in all_faces[1:]:
plane_face = self.my_op(plane_face, face, 'fuse')
solid = self.extrude_face(ref_face, plane_face, extrude_params)
return (solid, curve_strings, curve_count)
def my_op(self, big, small, op_name):
if (op_name == 'cut'):
op = BRepAlgoAPI_Cut(big, small)
elif (op_name == 'fuse'):
op = BRepAlgoAPI_Fuse(big, small)
elif (op_name == 'common'):
op = BRepAlgoAPI_Common(big, small)
op.SetFuzzyValue(self.PRECISION)
op.Build()
return op.Shape()
def build_body(self, face, normal, value):
extrusion_vec = gp_Vec(normal).Multiplied(value)
make_prism = BRepPrimAPI_MakePrism(face, extrusion_vec)
make_prism.Build()
prism = make_prism.Prism()
return prism.Shape()
def extrudeBasedOnType(self, face, normal, distance):
if (not (distance[0] < distance[1])):
raise Exception('incorrect distance')
large_value = distance[1]
small_value = distance[0]
if (large_value == 0):
return self.build_body(face, (- normal), (- small_value))
elif (small_value == 0):
return self.build_body(face, normal, large_value)
elif (np.sign(large_value) == np.sign(small_value)):
if (large_value < 0):
body1 = self.build_body(face, (- normal), (- small_value))
body2 = self.build_body(face, (- normal), (- large_value))
return self.my_op(body1, body2, 'cut')
else:
assert (large_value > 0)
body1 = self.build_body(face, normal, small_value)
body2 = self.build_body(face, normal, large_value)
return self.my_op(body2, body1, 'cut')
else:
assert (np.sign(large_value) != np.sign(small_value))
body1 = self.build_body(face, normal, large_value)
body2 = self.build_body(face, (- normal), (- small_value))
return self.my_op(body1, body2, 'fuse')
def extrude_face(self, ref_face, face, extrude_params):
distance = extrude_params['extrude_values']
surf = BRepAdaptor_Surface(ref_face).Plane()
normal = surf.Axis().Direction()
extruded_shape = self.extrudeBasedOnType(face, normal, distance)
return extruded_shape
def parse_sketch(self, sketch, profile):
transform = get_transform(sketch['transform'])
outer_facelist = []
inner_facelist = []
curve_count = 0
outer_string = []
inner_string = []
plane = create_sketch_plane(sketch['transform'])
for (idx, pl) in enumerate(profile['loops']):
(loop, curve_string, num_curve) = self.parse_loop(pl['profile_curves'], transform)
face_builder = BRepBuilderAPI_MakeFace(plane, loop)
if (not face_builder.IsDone()):
raise Exception('face builder not done')
face = face_builder.Face()
fixer = ShapeFix_Face(face)
fixer.SetPrecision(self.PRECISION)
fixer.FixOrientation()
analyzer = BRepCheck_Analyzer(fixer.Face())
if (not analyzer.IsValid()):
raise Exception('face check failed')
curve_count += num_curve
if pl['profile_curves'][0]['is_outer']:
outer_facelist.append(fixer.Face())
outer_string.append(curve_string)
else:
inner_facelist.append(fixer.Face())
inner_string.append(curve_string)
assert (len(outer_facelist) > 0)
final_face = outer_facelist[0]
for face in outer_facelist[1:]:
final_face = self.my_op(final_face, face, 'fuse')
for face in inner_facelist:
final_face = self.my_op(final_face, face, 'cut')
assert (len(outer_string) == 1)
out_str = ''
in_str = ''
for c_str in outer_string:
out_str += (('out\n' + c_str) + '\n')
for c_str in inner_string:
in_str += (('in\n' + c_str) + '\n')
final_str = (('face\n' + out_str) + in_str)
return (outer_facelist[0], final_face, final_str, curve_count)
def parse_loop(self, profile_loop, transform):
topo_wire = BRepBuilderAPI_MakeWire()
curve_strings = ''
curve_count = 0
for profile_curve in profile_loop:
(curve_edge, curve_string) = self.parse_curve(profile_curve, transform)
topo_wire.Add(curve_edge)
if (not topo_wire.IsDone()):
raise Exception('wire builder not done')
curve_string += '\n'
curve_count += 1
curve_strings += curve_string
fixer = ShapeFix_Wire()
fixer.Load(topo_wire.Wire())
fixer.SetPrecision(self.PRECISION)
fixer.FixClosed()
fixer.Perform()
return (fixer.Wire(), curve_strings, curve_count)
def parse_curve(self, curve, transform):
if (curve['type'] == 'Line3D'):
return self.create_line(curve, transform)
elif (curve['type'] == 'Circle3D'):
return self.create_circle(curve, transform)
elif (curve['type'] == 'Arc3D'):
return self.create_arc(curve, transform)
else:
raise Exception('unknown curve type')
def create_line(self, line, transform):
start = create_point(line['start_point'], transform)
end = create_point(line['end_point'], transform)
if (start.Distance(end) == 0):
raise Exception('start/end point same location')
topo_edge = BRepBuilderAPI_MakeEdge(start, end)
star_idx = self.save_vertex((line['start_point']['x'] + 0.0), (line['start_point']['y'] + 0.0), 'p')
end_idx = self.save_vertex((line['end_point']['x'] + 0.0), (line['end_point']['y'] + 0.0), 'p')
curve_string = f'l {star_idx} {end_idx}'
return (topo_edge.Edge(), curve_string)
def create_arc(self, arc, transform):
start = create_point(arc['start_point'], transform)
mid = create_point(arc['mid_point'], transform)
end = create_point(arc['end_point'], transform)
arc_occ = GC_MakeArcOfCircle(start, mid, end).Value()
topo_edge = BRepBuilderAPI_MakeEdge(arc_occ)
start_idx = self.save_vertex((arc['start_point']['x'] + 0.0), (arc['start_point']['y'] + 0.0), 'p')
end_idx = self.save_vertex((arc['end_point']['x'] + 0.0), (arc['end_point']['y'] + 0.0), 'p')
center_idx = self.save_vertex((arc['center_point']['x'] + 0.0), (arc['center_point']['y'] + 0.0), 'p')
mid_idx = self.save_vertex((arc['mid_point']['x'] + 0.0), (arc['mid_point']['y'] + 0.0), 'p')
curve_string = f'a {start_idx} {mid_idx} {center_idx} {end_idx}'
return (topo_edge.Edge(), curve_string)
def create_circle(self, circle, transform):
center = create_point(circle['center_point'], transform)
radius = circle['radius']
normal = create_unit_vec({'x': 0.0, 'y': 0.0, 'z': 1.0}, transform)
ref_vector3d = self.x_axis.Transformed(transform)
axis = gp_Ax2(center, normal, ref_vector3d)
gp_circle = gp_Circ(axis, abs(float(radius)))
topo_edge = BRepBuilderAPI_MakeEdge(gp_circle)
center_idx = self.save_vertex((circle['center_point']['x'] + 0.0), (circle['center_point']['y'] + 0.0), 'p')
radius_idx = self.save_vertex((abs(float(radius)) + 0.0), 0, 'r')
curve_string = f'c {center_idx} {radius_idx}'
return (topo_edge.Edge(), curve_string)
def save_vertex(self, h_x, h_y, text):
unique_key = f'{text}:x{h_x}y{h_y}'
index = 0
for key in self.vertex_dict.keys():
if (unique_key == key):
return index
index += 1
self.vertex_dict[unique_key] = [h_x, h_y]
return index |
def __compare_weight_handler__(compare, weight, weight_type):
valid_dict = {'class_weight': compare.classes, 'class_benchmark_weight': CLASS_BENCHMARK_SCORE_DICT.keys(), 'overall_benchmark_weight': OVERALL_BENCHMARK_SCORE_DICT.keys()}
error_dict = {'class_weight': COMPARE_CLASS_WEIGHT_ERROR, 'class_benchmark_weight': COMPARE_CLASS_BENCHMARK_WEIGHT_ERROR, 'overall_benchmark_weight': COMPARE_OVERALL_BENCHMARK_WEIGHT_ERROR}
warning_dict = {'class_weight': COMPARE_CLASS_WEIGHT_WARNING, 'class_benchmark_weight': COMPARE_CLASS_BENCHMARK_WEIGHT_WARNING, 'overall_benchmark_weight': COMPARE_OVERALL_BENCHMARK_WEIGHT_WARNING}
if (weight is None):
return None
if (not isinstance(weight, dict)):
raise pycmCompareError(error_dict[weight_type])
if (set(weight.keys()) == set(valid_dict[weight_type])):
if (all([isfloat(x) for x in weight.values()]) and (sum(weight.values()) != 0)):
setattr(compare, weight_type, weight)
else:
warn(warning_dict[weight_type], RuntimeWarning)
else:
raise pycmCompareError(error_dict[weight_type]) |
class SPSA(WrappedOptimizerBase):
def __init__(self, options: dict=None, callback=default_callback):
super().__init__()
self.set_callback(callback)
if (options is None):
options = {}
self.options = options
self.maxiter = options.get('maxiter', 100)
self.blocking = options.get('blocking', False)
self.allowed_increase = options.get('allowed_increase', None)
self.trust_region = options.get('trust_region', False)
self.learning_rate = options.get('learning_rate', None)
self.perturbation = options.get('perturbation', None)
self.last_avg = options.get('last_avg', 1)
self.resamplings = options.get('resamplings', 1)
self.perturbation_dims = options.get('perturbation_dims', None)
self.second_order = options.get('second_order', False)
self.regularization = options.get('regularization', None)
self.hessian_delay = options.get('hessian_delay', 0)
self.lse_solver = options.get('lse_solver', None)
self.initial_hessian = options.get('initial_hessian', None)
self.callback = callback
def minimize(self, fun: callable, x0: np.ndarray, grad: callable=None, bounds=None) -> OptimizerResult:
spsa = qiskit_optimizers.SPSA(maxiter=self.maxiter, blocking=self.blocking, allowed_increase=self.allowed_increase, trust_region=self.trust_region, learning_rate=self.learning_rate, perturbation=self.perturbation, last_avg=self.last_avg, resamplings=self.resamplings, perturbation_dims=self.perturbation_dims, second_order=self.second_order, regularization=self.regularization, hessian_delay=self.hessian_delay, lse_solver=self.lse_solver, initial_hessian=self.initial_hessian, callback=self.callback)
result_qiskit = spsa.minimize(fun=fun, x0=x0, jac=grad, bounds=bounds)
result = OptimizerResult()
result.x = result_qiskit.x
result.nit = result_qiskit.nit
result.fun = result_qiskit.fun
return result |
_args('v', 'i', 'v', 'v', 'v', 'v')
def zeros_like(g, input, dtype=None, layout=None, device=None, pin_memory=False, memory_format=None):
shape = g.op('Shape', input)
if (dtype is None):
dtype = 6
return g.op('ConstantOfShape', shape, value_t=torch.tensor([0], dtype=sym_help.scalar_type_to_pytorch_type[dtype])) |
def run_resnet50_epoch(train_model, batch_size, epoch_size, skip_first_n_iter=0):
epoch_iters = int((epoch_size / batch_size))
prefix = '{}_{}'.format(train_model._device_prefix, train_model._devices[0])
train_time = 0.0
train_examples = 0
for i in range(epoch_iters):
timeout = (600.0 if (i == 0) else 60.0)
with timeout_guard.CompleteInTimeOrDie(timeout):
t1 = time.time()
workspace.RunNet(train_model.net.Proto().name)
t2 = time.time()
dt = (t2 - t1)
if (i >= skip_first_n_iter):
train_time += dt
train_examples += batch_size
fmt = 'Finished iteration {}/{} ({:.2f} images/sec)'
print(fmt.format((i + 1), epoch_iters, (batch_size / dt)))
accuracy = workspace.FetchBlob((prefix + '/accuracy'))
loss = workspace.FetchBlob((prefix + '/loss'))
assert (loss < 40), 'Exploded gradients'
return (train_examples, train_time, accuracy, loss) |
def resolve_cache_dir(env_variable='MMF_CACHE_DIR', default='mmf'):
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, default)
cache_path = os.getenv(env_variable, default_cache_path)
if (not PathManager.exists(cache_path)):
try:
PathManager.mkdirs(cache_path)
except PermissionError:
cache_path = os.path.join(get_mmf_root(), '.mmf_cache')
PathManager.mkdirs(cache_path)
return cache_path |
def parse_args():
parser = argparse.ArgumentParser(description='Finetune a transformers model on a text classification task (NER) with accelerate library')
parser.add_argument('--dataset_name', type=str, default=None, help='The name of the dataset to use (via the datasets library).')
parser.add_argument('--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).')
parser.add_argument('--train_file', type=str, default=None, help='A csv or a json file containing the training data.')
parser.add_argument('--validation_file', type=str, default=None, help='A csv or a json file containing the validation data.')
parser.add_argument('--text_column_name', type=str, default=None, help='The column name of text to input in the file (a csv or JSON file).')
parser.add_argument('--label_column_name', type=str, default=None, help='The column name of label to input in the file (a csv or JSON file).')
parser.add_argument('--max_length', type=int, default=128, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded if `--pad_to_max_lenght` is passed.')
parser.add_argument('--pad_to_max_length', action='store_true', help='If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.')
parser.add_argument('--model_name_or_path', type=str, help='Path to pretrained model or model identifier from huggingface.co/models.', required=True)
parser.add_argument('--config_name', type=str, default=None, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', type=str, default=None, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--per_device_train_batch_size', type=int, default=4, help='Batch size (per device) for the training dataloader.')
parser.add_argument('--per_device_eval_batch_size', type=int, default=8, help='Batch size (per device) for the evaluation dataloader.')
parser.add_argument('--learning_rate', type=float, default=0.0001, help='Initial learning rate (after the potential warmup period) to use.')
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay to use.')
parser.add_argument('--num_train_epochs', type=int, default=3, help='Total number of training epochs to perform.')
parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--lr_scheduler_type', type=SchedulerType, default='linear', help='The scheduler type to use.', choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup'])
parser.add_argument('--num_warmup_steps', type=int, default=0, help='Number of steps for the warmup in the lr scheduler.')
parser.add_argument('--output_dir', type=str, default=None, help='Where to store the final model.')
parser.add_argument('--seed', type=int, default=None, help='A seed for reproducible training.')
parser.add_argument('--model_type', type=str, default=None, help='Model type to use if training from scratch.', choices=MODEL_TYPES)
parser.add_argument('--label_all_tokens', action='store_true', help='Setting labels of all special tokens to -100 and thus PyTorch will ignore them.')
parser.add_argument('--return_entity_level_metrics', action='store_true', help='Indication whether entity level metrics are to be returner.')
parser.add_argument('--task_name', type=str, default='ner', choices=['ner', 'pos', 'chunk'], help='The name of the task.')
parser.add_argument('--debug', action='store_true', help='Activate debug mode and run training only with a subset of data.')
parser.add_argument('--label_schema', type=str, default='BIO')
parser.add_argument('--label_list', type=str, default=None, help='Path of label list.')
args = parser.parse_args()
if ((args.task_name is None) and (args.train_file is None) and (args.validation_file is None)):
raise ValueError('Need either a task name or a training/validation file.')
else:
if (args.train_file is not None):
extension = args.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.'
if (args.validation_file is not None):
extension = args.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.'
if (args.output_dir is not None):
os.makedirs(args.output_dir, exist_ok=True)
return args |
def get_run_config(params_dict: DictConfig) -> Generator[(DictConfig, None, None)]:
params = flatten_sweep_params(params_dict)
combinations = list(itertools.product(*convert_to_tuple(params.values())))
keys = params.keys()
for combination in combinations:
run_config = DictConfig({})
for (key, val) in zip(keys, combination):
run_config[key] = val
(yield run_config) |
class Launcher(TmuxLauncher):
def options(self):
opt = Options()
opt.set(dataroot='/mnt/localssd/datasets/afhq/afhq/train', dataset_mode='imagefolder', checkpoints_dir='./checkpoints/', num_gpus=8, batch_size=32, preprocess='resize', load_size=256, crop_size=256)
return [opt.specify(name='afhq_pretrained')]
def train_options(self):
common_options = self.options()
return [opt.specify(display_freq=1600, print_freq=480, continue_train=True, evaluation_metrics='none') for opt in common_options]
def test_options(self):
opt = self.options()[0]
return [opt.tag('swapping_grid').specify(num_gpus=1, batch_size=1, dataroot='./testphotos/afhq/', dataset_mode='imagefolder', evaluation_metrics='structure_style_grid_generation'), opt.tag('simple_swapping').specify(num_gpus=1, batch_size=1, dataroot='.', evaluation_metrics='simple_swapping', input_structure_image='./testphotos/afhq/structure/flickr_dog_000846.jpg', input_texture_image='./testphotos/afhq/style/flickr_wild_001319.jpg', texture_mix_alpha=1.0), opt.tag('simple_interpolation').specify(num_gpus=1, batch_size=1, dataroot='.', evaluation_metrics='simple_swapping', input_structure_image='./testphotos/afhq/structure/flickr_dog_000846.jpg', input_texture_image='./testphotos/afhq/style/flickr_wild_001319.jpg', texture_mix_alpha='0.0 0.25 0.5 0.75 1.0')] |
def register_Ns3OlsrTopologyTuple_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_output_stream_operator()
cls.add_constructor([])
cls.add_constructor([param('ns3::olsr::TopologyTuple const &', 'arg0')])
cls.add_instance_attribute('destAddr', 'ns3::Ipv4Address', is_const=False)
cls.add_instance_attribute('expirationTime', 'ns3::Time', is_const=False)
cls.add_instance_attribute('lastAddr', 'ns3::Ipv4Address', is_const=False)
cls.add_instance_attribute('sequenceNumber', 'uint16_t', is_const=False)
return |
def format_instructions(instructions: str) -> str:
if (len(instructions) > 0):
instructions += '\n'
return instructions |
def collect_log_folders(log_dir):
tasks = {}
for filename in os.listdir(log_dir):
if (filename == 'latest'):
continue
splited = filename.split('-')
if (len(splited) != 5):
raise Exception(f'Unexpected log name {filename}.')
(task_name, number_of_nodes, object_size) = splited[2:5]
task = (task_name, number_of_nodes, object_size)
if (task not in tasks):
tasks[task] = []
tasks[task].append(filename)
return tasks |
def main():
args = sys.argv[1:]
if (len(args) == 1):
parse(args[0])
else:
usage() |
def test_get_time_line_value_no_interpolation(sequence_factory):
config.configuration.statistics_output.timeline_interval = 1
config.configuration.statistics_output.timeline_interpolation = False
start_time = time.time_ns()
sequence_factory.set_start_time(start_time)
sequence_factory._time_stamps = [(start_time + i) for i in range(3)]
sequence_factory._values = [i for i in range(3)]
assert (sequence_factory._get_time_line_value((start_time + 1)) == 0) |
class RteProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
return ['entailment', 'not_entailment']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, line[0]))
text_a = line[1]
text_b = line[2]
label = line[(- 1)]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples |
def get_results(out, err):
loss_pattern = '^Final loss. Train: (\\d.\\d+) Dev: (\\d.\\d+) Test: (\\d.\\d+)$'
acc_pattern = '^Final acc. Train: (\\d.\\d+) Dev: (\\d.\\d+) Test: (\\d.\\d+)$'
output = out.decode().split('\n')
try:
m = re.match(loss_pattern, output[(- 3)])
(train_loss, dev_loss, test_loss) = m.groups()
m = re.match(acc_pattern, output[(- 2)])
(train_acc, dev_acc, test_acc) = m.groups()
except:
print('Output:', output)
raise ValueError(('Error in subprocess: %s' % err.decode()))
return [train_loss, dev_loss, test_loss, train_acc, dev_acc, test_acc] |
def move_drone(drone_dir_x, drone_dir_y, speed):
global current_x_drone
global current_y_drone
if (distance_2d(current_x_drone, current_y_drone, drone_dir_x, drone_dir_y) >= 1):
theta = math.atan2((drone_dir_y - current_y_drone), ((drone_dir_x - current_x_drone) + 1e-06))
next_x = (current_x_drone + (speed * math.cos(theta)))
next_y = (current_y_drone + (speed * math.sin(theta)))
else:
next_x = current_x_drone
next_y = current_y_drone
drone.set_xy([next_x, next_y])
current_x_drone = next_x
current_y_drone = next_y
return drone |
def ref_nearest_interpolate_3d(x, output_size, align_corners, half_pixel, half_pixel_for_nn):
oshape = output_size
ishape = x.shape[(- 3):]
xx = x.reshape((- 1), *ishape)
ib = np.arange(xx.shape[0])
scale = (compute_scale_for_nn(ishape[0], oshape[0], align_corners, half_pixel_for_nn), compute_scale_for_nn(ishape[1], oshape[1], align_corners, half_pixel_for_nn), compute_scale_for_nn(ishape[2], oshape[2], align_corners, half_pixel_for_nn))
index = (get_source_index_for_nn(scale[0], np.arange(oshape[0]), half_pixel, half_pixel_for_nn), get_source_index_for_nn(scale[1], np.arange(oshape[1]), half_pixel, half_pixel_for_nn), get_source_index_for_nn(scale[2], np.arange(oshape[2]), half_pixel, half_pixel_for_nn))
index_1 = (index[0].astype(np.int32), index[1].astype(np.int32), index[2].astype(np.int32))
index_2 = (np.minimum(index_1[0], (ishape[0] - 1)), np.minimum(index_1[1], (ishape[1] - 1)), np.minimum(index_1[2], (ishape[2] - 1)))
yy = xx[np.ix_(ib, index_2[0], index_2[1], index_2[2])]
return yy.reshape((x.shape[:(- len(oshape))] + oshape)) |
def RealSort(ctx=None):
ctx = _get_ctx(ctx)
return ArithSortRef(Z3_mk_real_sort(ctx.ref()), ctx) |
_criterion('label_smoothed_cross_entropy_with_reg')
class LabelSmoothedCrossEntropyCriterionWithReg(LabelSmoothedCrossEntropyCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.reg_lambda_hidden = args.reg_lambda_hidden
self.reg_lambda_div = args.reg_lambda_div
self.reg_lambda_consis = args.reg_lambda_consis
self.reg_lambda_decov = args.reg_lambda_decov
self.reg_lambda_pre = args.reg_lambda_pre
self.reg_lambda_fract = args.reg_lambda_fract
self.step = 1
self.max_step = args.warmup_updates
self.hook_flag = False
def add_args(parser):
super(LabelSmoothedCrossEntropyCriterionWithReg, LabelSmoothedCrossEntropyCriterionWithReg).add_args(parser)
parser.add_argument('--reg-lambda-hidden', default=0.0, type=float, metavar='D', help='weight for the regularization loss')
parser.add_argument('--reg-lambda-div', default=0.0, type=float, metavar='D', help='weight for the regularization loss')
parser.add_argument('--reg-lambda-consis', default=0.0, type=float, metavar='D', help='weight for the regularization loss')
parser.add_argument('--reg-lambda-fract', default=0.0, type=float, metavar='D', help='weight for the regularization loss')
parser.add_argument('--reg-lambda-decov', default=0.0, type=float, metavar='D', help='weight for the regularization loss')
parser.add_argument('--reg-lambda-pre', default=0.0, type=float, metavar='D', help='weight for the regularization loss')
def forward(self, model, sample, reduce=True):
self.step += 1
if (self.step > self.max_step):
self.reg_lambda_hidden = 0.0
self.reg_lambda_pre = 0.0
self.reg_lambda_div = 0.0
self.reg_lambda_consis = 0.0
self.reg_lambda_decov = 0.0
self.reg_lambda_fract = 0.0
self.decov_hooks = []
self.pre_hooks = []
def hook_fn(module, input, output):
if (self.reg_lambda_decov != 0.0):
decov_tmp = input[0]
decov_tmp = (decov_tmp - decov_tmp.mean(dim=0, keepdim=True).mean(dim=1, keepdim=True))
decov_tmp = decov_tmp.mean(dim=0)
decov_tmp = (torch.mm(decov_tmp, decov_tmp.transpose(1, 0)) / decov_tmp.shape[0])
decov_tmp = (0.5 * (decov_tmp.norm() - (torch.diag(decov_tmp).unsqueeze(0) * decov_tmp).norm()))
decov_tmp = torch.abs(decov_tmp).mean()
self.decov_hooks.append(torch.abs(decov_tmp))
del decov_tmp
if (self.reg_lambda_pre != 0.0):
pre_tmp = torch.abs(input[0]).sum(dim=(- 1)).mean()
self.pre_hooks.append(pre_tmp)
del pre_tmp
if (not self.hook_flag):
for layer in model.encoder.layers:
layer.self_attn_layer_norm.register_forward_hook(hook_fn)
layer.final_layer_norm.register_forward_hook(hook_fn)
self.hook_flag = True
net_output = model(**sample['net_input'], return_all_hiddens=True)
(loss, nll_loss) = self.compute_loss(model, net_output, sample, reduce=reduce)
if (self.reg_lambda_decov != 0.0):
decov_loss = 0.0
for (idx, decov_inp) in enumerate(self.decov_hooks):
decov_loss += (decov_inp / len(self.decov_hooks))
decov_loss = (decov_loss * sample['ntokens'])
if (self.reg_lambda_pre != 0.0):
pre_loss = 0.0
for (idx, pre_inp) in enumerate(self.pre_hooks):
pre_loss += ((pre_inp / len(self.pre_hooks)) / (len(model.encoder.layers) + len(model.decoder.layers)))
pre_loss = (pre_loss * sample['target'].size(0))
del self.decov_hooks, self.pre_hooks
torch.cuda.empty_cache()
def check_mask_expert(norm):
if (norm is None):
return None
return norm.__dict__.get('mask_expert', None)
mask_experts_enc = [check_mask_expert(model.encoder.layer_norm)]
for layer in model.encoder.layers:
mask_experts_enc.append(check_mask_expert(layer.final_layer_norm))
mask_experts_enc.append(check_mask_expert(layer.self_attn_layer_norm))
mask_experts_dec = [check_mask_expert(model.decoder.layer_norm)]
for layer in model.decoder.layers:
mask_experts_dec.append(check_mask_expert(layer.final_layer_norm))
mask_experts_dec.append(check_mask_expert(layer.self_attn_layer_norm))
mask_experts_dec.append(check_mask_expert(layer.encoder_attn_layer_norm))
mask_experts_enc = list(filter((lambda x: (x is not None)), mask_experts_enc))
mask_experts_dec = list(filter((lambda x: (x is not None)), mask_experts_dec))
(mask_experts_enc_flag, mask_experts_dec_flag) = (len(mask_experts_enc), len(mask_experts_dec))
if mask_experts_enc:
mask_experts_enc = torch.stack(mask_experts_enc)
if mask_experts_dec:
mask_experts_dec = torch.stack(mask_experts_dec)
(div_loss, consis_loss) = (None, None)
if mask_experts_enc_flag:
div_loss = (mask_experts_enc.std(dim=1) / mask_experts_enc.mean(dim=1)).norm(dim=(- 1)).mean(dim=(- 1))
consis_loss = (mask_experts_enc.std(dim=0) / mask_experts_enc.mean(dim=0)).norm(dim=(- 1)).mean(dim=(- 1))
if mask_experts_dec_flag:
if (div_loss is None):
div_loss = (mask_experts_dec.std(dim=1) / mask_experts_dec.mean(dim=1)).norm(dim=(- 1)).mean(dim=(- 1))
consis_loss += (mask_experts_dec.std(dim=0) / mask_experts_dec.mean(dim=0)).norm(dim=(- 1)).mean(dim=(- 1))
else:
div_loss += (mask_experts_dec.std(dim=1) / mask_experts_dec.mean(dim=1)).norm(dim=(- 1))
consis_loss += (mask_experts_dec.std(dim=0) / mask_experts_dec.mean(dim=0)).norm(dim=(- 1)).mean(dim=(- 1))
sample_size = (sample['target'].size(0) if self.args.sentence_avg else sample['ntokens'])
reg_loss = 0.0
logging_output = dict()
(dec_len, enc_len) = (len(net_output[1]['inner_states']), len(net_output[1]['encoder_states']))
for inner_enc in net_output[1]['encoder_states'][2:]:
reg_loss += ((inner_enc.norm(dim=(- 1)) - net_output[1]['encoder_states'][1].norm(dim=(- 1))).abs().sum(dim=1).mean(dim=0) / (enc_len - 1))
for inner_dec in net_output[1]['inner_states'][2:]:
reg_loss += ((inner_dec.norm(dim=(- 1)) - net_output[1]['inner_states'][1].norm(dim=(- 1))).abs().sum(dim=1).mean(dim=0) / (dec_len - 1))
if (self.reg_lambda_decov != 0.0):
logging_output['reg_loss_decov'] = utils.item(decov_loss.data)
loss += (self.reg_lambda_decov * decov_loss)
if (self.reg_lambda_pre != 0.0):
logging_output['reg_loss_pre'] = utils.item(pre_loss.data)
loss += (self.reg_lambda_pre * pre_loss)
if ((reg_loss != 0.0) and (self.reg_lambda_hidden != 0.0)):
logging_output['reg_loss_hidden'] = utils.item(reg_loss.data)
loss += (self.reg_lambda_hidden * reg_loss)
if ((div_loss is not None) and (self.reg_lambda_div != 0.0)):
div_loss = (div_loss * sample_size)
logging_output['reg_loss_div'] = utils.item(div_loss.data)
loss += (self.reg_lambda_div * div_loss)
if ((consis_loss is not None) and (self.reg_lambda_consis != 0.0)):
consis_loss = (consis_loss * sample_size)
logging_output['reg_loss_consis'] = utils.item(consis_loss.data)
loss += (self.reg_lambda_consis * consis_loss)
logging_output.update({'loss': (utils.item(loss.data) if reduce else loss.data), 'nll_loss': (utils.item(nll_loss.data) if reduce else nll_loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size})
return (loss, sample_size, logging_output)
def aggregate_logging_outputs(logging_outputs):
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
return {'loss': (((sum((log.get('loss', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'nll_loss': (((sum((log.get('nll_loss', 0) for log in logging_outputs)) / ntokens) / math.log(2)) if (ntokens > 0) else 0.0), 'reg_loss_hidden': (((sum((log.get('reg_loss_hidden', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'reg_loss_div': (((sum((log.get('reg_loss_div', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'reg_loss_consis': (((sum((log.get('reg_loss_consis', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'reg_loss_decov': (((sum((log.get('reg_loss_decov', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'reg_loss_pre': (((sum((log.get('reg_loss_pre', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'reg_loss_fract': (((sum((log.get('reg_loss_fract', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size} |
class CandidatePreferences(object):
def __init__(self, prefer_binary=False, allow_all_prereleases=False):
self.allow_all_prereleases = allow_all_prereleases
self.prefer_binary = prefer_binary |
def _init_logger(path=None, stdout='tqdm', level='INFO'):
level = _get_level(level)
logger = logging.getLogger(ROOT_NAME)
logger.propagate = False
logger.setLevel(1)
_set_stdout_handler(logger, stdout, level)
if (path is not None):
_add_file_handler(logger, path, level)
return logger |
def run_eval(model_type: str, task: str, from_pretrained: str, split: str='test', batch_size: int=1024, model_config_file: typing.Optional[str]=None, data_dir: str='./data', no_cuda: bool=False, seed: int=42, tokenizer: str='iupac', num_workers: int=8, debug: bool=False, metrics: typing.Tuple[(str, ...)]=(), log_level: typing.Union[(str, int)]=logging.INFO) -> typing.Dict[(str, float)]:
local_rank = (- 1)
(device, n_gpu, is_master) = utils.setup_distributed(local_rank, no_cuda)
utils.setup_logging(local_rank, save_path=None, log_level=log_level)
utils.set_random_seeds(seed, n_gpu)
pretrained_dir = Path(from_pretrained)
logger.info(f'device: {device} n_gpu: {n_gpu}')
model = registry.get_task_model(model_type, task, model_config_file, from_pretrained)
model = model.to(device)
runner = ForwardRunner(model, device, n_gpu)
runner.initialize_distributed_model()
valid_dataset = utils.setup_dataset(task, data_dir, split, tokenizer)
valid_loader = utils.setup_loader(valid_dataset, batch_size, local_rank, n_gpu, 1, num_workers)
metric_functions = [registry.get_metric(name) for name in metrics]
save_outputs = run_eval_epoch(valid_loader, runner, is_master)
target = [el['target'] for el in save_outputs]
prediction = [el['prediction'] for el in save_outputs]
metrics_to_save = {name: metric(target, prediction) for (name, metric) in zip(metrics, metric_functions)}
logger.info(''.join((f'{name}: {val}' for (name, val) in metrics_to_save.items())))
with (pretrained_dir / 'results.pkl').open('wb') as f:
pkl.dump((metrics_to_save, save_outputs), f)
return metrics_to_save |
def nnb_template_command(args):
if (len(args.files) >= 2):
output = args.files.pop()
resolve_file_format(args, args.files)
if (args.import_format not in nnabla.utils.converter.formats.import_name):
print('Import format ({}) is not supported.'.format(args.import_format))
return False
nnabla.utils.converter.nnb_template(args, args.files, output)
return True
print('Input and Output arg is mandatory.')
return False |
def test_model(model: nn.Module, test_set: data.DataLoader, number_of_classes: int) -> (score.FloatScore, score.DictScore):
model.eval()
def test_average() -> score.FloatScore:
correct = 0
total = 0
with torch.set_grad_enabled(False):
for (inputs, yreal) in tqdm(test_set, unit='images', desc='Testing model (average)', leave=True, ascii=True):
(inputs, yreal) = (inputs.cuda(), yreal.cuda())
(ypred, _) = model(inputs)
(_, predicted) = torch.max(ypred.data, 1)
total += yreal.size(0)
correct += (predicted == yreal).sum().item()
accuracy = ((100 * correct) / total)
log.info('Accuracy of the network on the {} test images (average): {}'.format(total, accuracy))
with open('epoch_logs.txt', 'a+') as file:
file.write('Test Acc: {}\n'.format(accuracy))
return score.FloatScore(accuracy)
def test_per_class() -> score.DictScore:
class_correct = list((0.0 for _ in range(number_of_classes)))
class_total = list((0.0 for _ in range(number_of_classes)))
total = 0
with torch.no_grad():
for (inputs, yreal) in tqdm(test_set, unit='images', desc='Testing model (per class)', leave=True, ascii=True):
(inputs, yreal) = (inputs.cuda(), yreal.cuda())
total += yreal.size(0)
(ypred, _) = model(inputs)
(_, predicted) = torch.max(ypred, 1)
c = (predicted == yreal).squeeze()
for i in range(yreal.shape[0]):
label = yreal[i]
class_correct[label] += c[i].item()
class_total[label] += 1
log.info('Accuracy of the network on the {} test images (per-class):'.format(total))
per_class_accuracy = {}
for i in range(number_of_classes):
accuracy = ((100 * class_correct[i]) / (class_total[i] + 0.0001))
per_class_accuracy[i] = accuracy
print(('Accuracy of %5s : %2d %%' % (i, accuracy)))
return score.DictScore(per_class_accuracy)
return (test_average(), test_per_class()) |
def test_optimal():
envs = [StrictTMazeEnv(init_reward_side=i, n_trials=100) for i in [1, 0, 1, 0]]
evaluator = MultiEnvEvaluator(make_net, activate_net, envs=envs, batch_size=4, max_env_steps=1600)
fitness = evaluator.eval_genome(None, None)
assert (fitness == 98.8) |
def per_class_iu(hist):
return (np.diag(hist) / (((hist.sum(1) + 1e-08) + hist.sum(0)) - np.diag(hist))) |
class ChemProtProcessor(BlueBERTProcessor):
def get_labels(self):
return ['CPR:3', 'CPR:4', 'CPR:5', 'CPR:6', 'CPR:9', 'false'] |
class ProbablisticCAE(nn.Module):
in_num = 1
out_num = 1
def __init__(self, in_ch_size=3, out_ch_size=3, row_size=1, col_size=20, level_back=5, downsample=True, k_sizes=(1, 3, 5), ch_range=(64, 256), c=None, delta_init_factor=0.0):
super(ProbablisticCAE, self).__init__()
self.in_ch_size = in_ch_size
self.out_ch_size = out_ch_size
self.cols = col_size
self.rows = row_size
self.level_back = level_back
self.downsample = downsample
self.max_args = 1
self.skip = [0, 1]
self.k_sizes = list(k_sizes)
self.k_range = [1, len(k_sizes)]
self.ch_range = list(ch_range)
self.max_ch = np.max(self.ch_range)
self.module_num = (len(self.k_sizes) * len(self.skip))
self.is_active = np.empty(((self.rows * self.cols) + self.out_num))
(categories, integers) = self.get_variable_space()
n = (np.sum((categories - 1)) + (2 * len(integers)))
self.asng = AdaptiveSNG(categories, integers, delta_init=(1 / (n ** delta_init_factor)), lam=2)
self.module_info = []
j = 0
for i in range((self.cols * self.rows)):
module = []
for s in self.skip:
for k in self.k_sizes:
module.append((j, (('Conv_' + str(i)) + '_{}_{}'.format(k, s)), 1, s))
j += 1
self.module_info.append(module)
self.module_info.append([(j, 'OutDeconv', 1, None)])
if (c is None):
self.init_network()
else:
self.init_network_by_c(c)
print('Num. of nodes (conv + out): {}'.format(len(self.module_info)))
print('Num. of dimension of the categorical distribution: {}'.format(self.asng.d_cat))
print('Num. of valid dimension of the categorical distribution: {}'.format(self.asng.valid_d_cat))
print('Num. of params in the categorical distribution: {}'.format(self.asng.n_cat))
print('Num. of dimension of the normal distribution: {}'.format(self.asng.d_int))
print('Num. of params in the normal distribution: {}'.format(self.asng.n_int))
print('Num. of weight parameters: {}'.format(np.sum((np.prod(param.size()) for param in self.parameters()))))
def forward_mle(self, x):
(c_cat, c_int) = self.asng.mle()
return self.forward(c_cat, c_int, x)
def forward(self, c_cat, c_int, x):
net_info = self.gene_to_net_info(c_cat.argmax(axis=1), c_int)
self.check_active(net_info)
f = self.f
h = x
active_list = []
h_stack = []
for i in range((self.rows * self.cols)):
if self.is_active[i]:
(m_idx, _, _, is_skip) = self.module_info[i][((net_info[i][0] * len(self.k_sizes)) + net_info[i][1])]
ch_num = net_info[i][2]
if (self.downsample and (not is_skip) and ((h.shape[2] == 1) or (h.shape[3] == 1))):
continue
active_list.append(i)
h = f[m_idx](h, ch_num=ch_num)
if is_skip:
h_stack.append(h)
conv_num = ((self.rows * self.cols) * self.module_num)
for i in active_list[::(- 1)]:
(m_idx, _, _, is_skip) = self.module_info[i][((net_info[i][0] * len(self.k_sizes)) + net_info[i][1])]
ch_num = net_info[i][2]
if is_skip:
xx = h_stack.pop()
h = f[(m_idx + conv_num)](h, xx, ch_num=ch_num)
else:
h = f[(m_idx + conv_num)](h, ch_num=ch_num)
h = f[(- 1)](h)
return h
def get_variable_space(self):
categories = []
integers = []
for i in range((self.cols * self.rows)):
categories += [2]
integers += [self.k_range]
integers += [self.ch_range]
c = (i // self.rows)
k = ((self.level_back * self.rows) if ((c - self.level_back) >= 0) else ((c * self.rows) + self.in_num))
categories += ([k] * self.max_args)
k = ((self.level_back * self.rows) if ((self.cols - self.level_back) >= 0) else ((self.cols * self.rows) + self.in_num))
categories += [k]
return (np.array(categories), np.array(integers))
def init_network(self):
self.f = nn.ModuleList([])
for i in range((self.cols * self.rows)):
for s in self.skip:
for k in self.k_sizes:
stride = (2 if (self.downsample and (s == 0)) else 1)
self.f.append(op.ConvUnit(in_ch=self.max_ch, k_size=k, pad_size=(k // 2), stride=stride, out_ch=self.max_ch))
for i in range((self.cols * self.rows)):
for s in self.skip:
for k in self.k_sizes:
stride = (2 if (self.downsample and (s == 0)) else 1)
self.f.append(op.DeconvUnit(in_ch=self.max_ch, k_size=k, pad_size=(k // 2), stride=stride, out_ch=self.max_ch))
self.f.append(op.OutputDeconv(in_ch=self.max_ch, k_size=3, pad_size=1, stride=1, out_ch=self.out_ch_size))
def init_network_by_c(self, c):
self.asng.theta_cat = c[0]
self.asng.theta_int[0] = c[1]
net_info = self.gene_to_net_info(c[0].argmax(axis=1), c[1])
self.check_active(net_info)
self.f = nn.ModuleList([])
in_list = [self.in_ch_size]
for i in range((self.cols * self.rows)):
for s in self.skip:
for (r, k) in enumerate(self.k_sizes):
stride = (2 if (self.downsample and (s == 0)) else 1)
if (self.is_active[i] and (net_info[i][0] == s) and (net_info[i][1] == r)):
ch_num = net_info[i][2]
self.f.append(op.ConvUnit(in_ch=in_list[(- 1)], k_size=k, pad_size=(k // 2), stride=stride, out_ch=ch_num))
in_list.append(ch_num)
else:
self.f.append(None)
j = 2
for i in range((self.cols * self.rows)):
for s in self.skip:
for (r, k) in enumerate(self.k_sizes):
stride = (2 if (self.downsample and (not s)) else 1)
if (self.is_active[i] and (net_info[i][0] == s) and (net_info[i][1] == r)):
ch_num = net_info[i][2]
self.f.append(op.DeconvUnit(in_ch=in_list[j], k_size=k, pad_size=(k // 2), stride=stride, out_ch=ch_num))
j = np.minimum((j + 1), (len(in_list) - 1))
else:
self.f.append(None)
self.f.append(op.OutputDeconv(in_ch=in_list[1], k_size=3, pad_size=1, stride=1, out_ch=self.out_ch_size))
def __check_course_to_out(self, net_info, n):
if (not self.is_active[n]):
self.is_active[n] = True
for in_node in net_info[n][3:]:
if (in_node >= self.in_num):
self.__check_course_to_out(net_info, (in_node - self.in_num))
def check_active(self, net_info):
self.is_active[:] = False
for i in range(self.out_num):
self.__check_course_to_out(net_info, ((len(self.module_info) - i) - 1))
def gene_to_net_info(self, c_cat, c_int):
cc_int = np.round(np.clip(c_int, self.asng.int_min, self.asng.int_max))
cc_int = cc_int.astype(np.int)
net_info = []
p = q = 0
for (i, m) in enumerate(self.module_info):
c = (i // self.rows)
min_index = ((((c - self.level_back) * self.rows) + self.in_num) if ((c - self.level_back) >= 0) else 0)
if (i < (self.rows * self.cols)):
(_, _, args, _) = m[(((c_cat[p] * len(self.k_sizes)) + cc_int[q]) - 1)]
net_info.append(((([c_cat[p]] + [(cc_int[q] - 1)]) + [cc_int[(q + 1)]]) + [(min_index + c_cat[((p + j) + 1)]) for j in range(args)]))
else:
net_info.append(((([0] + [0]) + [0]) + [(min_index + c_cat[p])]))
p += (self.max_args + 1)
q += 2
return net_info
def get_params(self, net_info):
self.check_active(net_info)
conv_num = ((self.rows * self.cols) * self.module_num)
param_num = 0
f = self.f
for i in range((self.rows * self.cols)):
if self.is_active[i]:
(j, _, _, _) = self.module_info[i][((net_info[i][0] * len(self.k_sizes)) + net_info[i][1])]
param_num += np.sum((np.prod(param.size()) for param in f[j].parameters()))
param_num += np.sum((np.prod(param.size()) for param in f[(j + conv_num)].parameters()))
param_num += np.sum((np.prod(param.size()) for param in f[(- 1)].parameters()))
return param_num
def get_params_mle(self):
(c_cat, c_int) = self.asng.mle()
net_info = self.gene_to_net_info(c_cat.argmax(axis=1), c_int)
return self.get_params(net_info)
def network_string(self, net_info, sep='\n'):
self.check_active(net_info)
net_str = ''
for (i, m) in enumerate(self.module_info):
if self.is_active[i]:
(_, name, args, _) = m[((net_info[i][0] * len(self.k_sizes)) + net_info[i][1])]
if (i == (len(self.module_info) - 1)):
ch_num = self.out_ch_size
else:
ch_num = net_info[i][2]
for j in range(args):
in_n = net_info[i][(j + 3)]
if (in_n >= self.in_num):
in_module_id = ((net_info[(in_n - self.in_num)][0] * len(self.k_sizes)) + net_info[(in_n - self.in_num)][1])
in_ch_num = net_info[(in_n - self.in_num)][2]
net_str += (((self.module_info[(in_n - self.in_num)][in_module_id][1] + '_') + str(in_ch_num)) + ' -> ')
net_str += ((((name + '_') + str(ch_num)) + ';') + sep)
else:
net_str += ((((((('Input_%d' % in_n) + ' -> ') + name) + '_') + str(ch_num)) + ';') + sep)
return net_str
def mle_network_string(self, sep='\n'):
(c_cat, c_int) = self.asng.mle()
net_info = self.gene_to_net_info(c_cat.argmax(axis=1), c_int)
return self.network_string(net_info, sep=sep)
def p_model_update(self, c, losses, range_restriction=True):
self.asng.update(c, losses, range_restriction) |
def sample_code_chunk(code, size):
assert ((size > 0) and (size <= len(code)))
start = np.random.randint(((len(code) - size) + 1))
end = (start + size)
return (code[start:end], start, end) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('ishape, index', [([10], [[0]]), ([10], [[1, 5, 8]]), ([10], [[(- 1), (- 5), (- 8)]]), ([3, 4], [[0]]), ([3, 4], [[0], [0]]), ([3, 4], [[0, 1], [0, 2]]), ([3, 4], [[0, (- 1)], [0, (- 2)]]), ([2, 3, 4], [[0]]), ([2, 3, 4], [[0], [1]]), ([2, 3, 4], [[0], [1], [2]]), ([2, 3, 4], [[0, 1]]), ([2, 3, 4], [[0, 1], [1, 2]]), ([2, 3, 4], [[0, 1], [1, 2], [1, 0]]), ([4, 4, 4, 4], [[[0, 1], [2, 3]], [[0, 1], [2, 3]]])])
def test_forward_backward(seed, ishape, index, ctx, func_name):
rng = np.random.RandomState(seed)
inputs = [rng.randn(*ishape).astype(np.float32), np.array(index)]
function_tester(rng, F.gather_nd, gather_nd, inputs, func_name=func_name, ctx=ctx, backward=[True, False]) |
def save_training_checkpoint(epoch, model, optimizer, best_f1, filename):
state = {'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'best_f1': best_f1}
torch.save(state, filename) |
def extract_sp_S1_models(layout):
wandb_dir = RESULT_PATH.format(layout=layout)
runs = glob.glob(f'{wandb_dir}/run*')
run_ids = [x.split('-')[(- 1)] for x in runs]
print(runs)
print(run_ids)
api = wandb.Api()
i = 0
for run_id in run_ids:
run = api.run(f'{WANDB_NAME}/Overcooked/{run_id}')
if (run.state == 'finished'):
i += 1
final_ep_sparse_r = run.summary['ep_sparse_r']
history = run.history()
history = history[['_step', 'ep_sparse_r']]
steps = history['_step'].to_numpy()
ep_sparse_r = history['ep_sparse_r'].to_numpy()
files = run.files()
actor_pts = [f for f in files if f.name.startswith('actor_periodic')]
actor_versions = [eval(f.name.split('_')[(- 1)].split('.pt')[0]) for f in actor_pts]
actor_pts = {v: p for (v, p) in zip(actor_versions, actor_pts)}
actor_versions = sorted(actor_versions)
max_actor_versions = (max(actor_versions) + 1)
max_steps = max(steps)
new_steps = [steps[0]]
new_ep_sparse_r = [ep_sparse_r[0]]
for (s, er) in zip(steps[1:], ep_sparse_r[1:]):
l_s = new_steps[(- 1)]
l_er = new_ep_sparse_r[(- 1)]
for w in range((l_s + 1), s, 100):
new_steps.append(w)
new_ep_sparse_r.append((l_er + (((er - l_er) * (w - l_s)) / (s - l_s))))
steps = new_steps
ep_sparse_r = new_ep_sparse_r
selected_pts = dict(init=0, mid=(- 1), final=max_steps)
mid_ep_sparse_r = (final_ep_sparse_r / 2)
min_delta = .0
for (s, score) in zip(steps, ep_sparse_r):
if (min_delta > abs((mid_ep_sparse_r - score))):
min_delta = abs((mid_ep_sparse_r - score))
selected_pts['mid'] = s
selected_pts = {k: int(((v / max_steps) * max_actor_versions)) for (k, v) in selected_pts.items()}
for (tag, exp_version) in selected_pts.items():
version = actor_versions[0]
for actor_version in actor_versions:
if (abs((exp_version - version)) > abs((exp_version - actor_version))):
version = actor_version
print(f'sp{i}', tag, 'Expected', exp_version, 'Found', version)
ckpt = actor_pts[version]
ckpt.download('tmp', replace=True)
fcp_s1_dir = f'{POLICY_POOL_PATH}/{layout}/fcp/s1'
os.system(f'mv tmp/actor_periodic_{version}.pt {fcp_s1_dir}/sp{i}_{tag}_actor.pt') |
def load_combined_test_data_wov(output_path: str):
id_p_te = load_data_tensors_TW(join(output_path, 'vectors', 'test', 'identifiers_param_test_datapoints_x.npy'))
id_r_te = load_data_tensors_TW(join(output_path, 'vectors', 'test', 'identifiers_ret_test_datapoints_x.npy'))
id_v_te = load_data_tensors_TW(join(output_path, 'vectors', 'test', 'identifiers_var_test_datapoints_x.npy'))
return (torch.cat((id_p_te, id_r_te, id_v_te)), torch.cat((load_data_tensors_TW(join(output_path, 'vectors', 'test', 'tokens_param_test_datapoints_x.npy')), load_data_tensors_TW(join(output_path, 'vectors', 'test', 'tokens_ret_test_datapoints_x.npy')), load_data_tensors_TW(join(output_path, 'vectors', 'test', 'tokens_var_test_datapoints_x.npy')))), ((len(id_p_te) - 1), ((len(id_p_te) + len(id_r_te)) - 1), (((len(id_p_te) + len(id_r_te)) + len(id_v_te)) - 1))) |
def add_fast_rcnn_losses(model):
(cls_prob, loss_cls) = model.net.SoftmaxWithLoss((['cls_score', 'labels_int32'] + (['label_loss_weights'] if cfg.TRAIN.CLS_SIZE_WEIGHTED_LOSS else [])), ['cls_prob', 'loss_cls'], scale=(1.0 / cfg.NUM_GPUS))
loss_bbox = model.net.SmoothL1Loss(['bbox_pred', 'bbox_targets', 'bbox_inside_weights', 'bbox_outside_weights'], 'loss_bbox', scale=(1.0 / cfg.NUM_GPUS))
loss_gradients = blob_utils.get_loss_gradients(model, [loss_cls, loss_bbox])
model.Accuracy(['cls_prob', 'labels_int32'], 'accuracy_cls')
model.AddLosses(['loss_cls', 'loss_bbox'])
model.AddMetrics('accuracy_cls')
return loss_gradients |
def options(opt):
assert (default_profile in profiles)
opt.add_option('-d', '--build-profile', action='store', default=default_profile, help=('Specify the build profile. Build profiles control the default compilation flags used for C/C++ programs, if CCFLAGS/CXXFLAGS are not set in the environment. [Allowed Values: %s]' % ', '.join([repr(p) for p in list(profiles.keys())])), choices=list(profiles.keys()), dest='build_profile')
opt.add_option('--check-profile', help='print out current build profile', default=False, dest='check_profile', action='store_true')
opt.add_option('--disable-werror', help='disable -Werror flag (warnings treated as errors', default=False, dest='disable_werror', action='store_true') |
_python_op()
def resize_fn(config, frame: sp.FrameType) -> sp.FrameType:
return cv2.resize(frame, (config.args['width'], config.args['height'])) |
def ShearY(img, v):
assert ((- 0.3) <= v <= 0.3)
if (random_mirror and (random.random() > 0.5)):
v = (- v)
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0)) |
class STFTLoss(torch.nn.Module):
def __init__(self, fft_size=1024, shift_size=120, win_length=600, window='hann_window'):
super(STFTLoss, self).__init__()
self.fft_size = fft_size
self.shift_size = shift_size
self.win_length = win_length
self.register_buffer('window', getattr(torch, window)(win_length))
self.spectral_convergenge_loss = SpectralConvergengeLoss()
self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss()
def forward(self, x, y):
x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window)
y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window)
sc_loss = self.spectral_convergenge_loss(x_mag, y_mag)
mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag)
return (sc_loss, mag_loss) |
def extract_masks(segmentations, target_vectors):
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
(batch_size, num_classes, h, w) = segmentations.size()
target_masks = torch.empty(batch_size, h, w, device=device)
non_target_masks = torch.empty(batch_size, h, w, device=device)
for i in range(batch_size):
class_indices = target_vectors[i].eq(1.0)
non_class_indices = target_vectors[i].eq(0.0)
target_masks[i] = segmentations[i][class_indices].amax(dim=0)
non_target_masks[i] = segmentations[i][non_class_indices].amax(dim=0)
return (target_masks.sigmoid(), non_target_masks.sigmoid()) |
class ScipyNelderMeadTuner(Tuner):
def tune_impl(self, **kwargs):
if ('init_method' in kwargs):
init_method = kwargs['init_method']
else:
init_method = 'average'
if (self.start_config is not None):
config = self.start_config
elif (init_method is 'average'):
coordinates = list(self.search_space.keys())
config = {}
for coordinate in coordinates:
param = self.search_space[coordinate]
if isinstance(param, dict):
if ('range' in param):
(minval, maxval) = param['range']
config[coordinate] = ((maxval + minval) / 2)
elif isinstance(param, list):
config[k] = param[0]
elif (init_method is 'random'):
config = RandomTuner.generate_configs(self.search_space, 1)[0]
else:
print('{} is invalid init_method!'.format(init_method))
return
sorted_vars = sorted(list(config.keys()))
def config_to_array(config):
return [config[var] for var in sorted_vars]
def array_to_config(arr):
return {var: value for (var, value) in zip(sorted_vars, arr)}
def optimization_function(arr):
config = array_to_config(arr)
score = self.evaluate_configs([config])
if self.maximize:
score = ((- 1) * score)
return score
optimize.minimize(optimization_function, config_to_array(config), method='Nelder-Mead', options={'maxfev': self.budget}) |
class Mask(nn.Module):
def __init__(self, dict_size=12099):
super(Mask, self).__init__()
self.name = 'Baseline'
self.encoder = Encoder()
self.embs = nn.ModuleList([nn.Embedding(dict_size, 1000)])
self.cas = nn.ModuleList([CrossAtt(vis_dim=2048, lang_dim=1000)])
self.decoder = Decoder()
def forward(self, prev_frames, prev_masks, in_frames, words, eval=False):
embed = self.embs[0](words)
(vis_r5s, vis_r4s, vis_r3s, vis_r2s, vis_c1s) = self.encoder(in_frames)
multi_r5s = self.cas[0](vis_r5s, embed)
logit = self.decoder(multi_r5s, vis_r5s, vis_r4s, vis_r3s, vis_r2s, vis_c1s)
mask = F.softmax(logit, dim=1)
return (mask, logit) |
def clean_up() -> None:
base = Path('.')
[p.unlink() for p in base.glob('run.log')]
[p.unlink() for p in base.glob('*.npy')] |
def test(tmp_path):
filename = os.path.join(tmp_path, 'whatever.parquet')
original = ak.Record({'x': 1, 'y': [1, 2, 3], 'z': 'THREE'})
assert (ak.from_arrow(ak.to_arrow(original)).to_list() == original.to_list())
assert (ak.from_arrow(ak.to_arrow_table(original)).to_list() == original.to_list())
ak.to_parquet(original, filename)
assert (ak.from_parquet(filename).to_list() == original.to_list()) |
class CommandContextMixIn(object):
def __init__(self):
super(CommandContextMixIn, self).__init__()
self._in_main_context = False
self._main_context = ExitStack()
def main_context(self):
assert (not self._in_main_context)
self._in_main_context = True
try:
with self._main_context:
(yield)
finally:
self._in_main_context = False
def enter_context(self, context_provider):
assert self._in_main_context
return self._main_context.enter_context(context_provider) |
def from_major_code(mc, final_descent=False):
if (not mc):
w = []
else:
w = [len(mc)]
for i in reversed(range(1, len(mc))):
d = Permutation(w, check=False).descents(final_descent=final_descent)
d.reverse()
a = [x for x in range(1, (len(w) + 1)) if (x not in d)]
d.append(0)
l = mc[(i - 1)]
indices = (d + a)
w.insert(indices[l], i)
return Permutation(w, check=False) |
class AutoModelForTableQuestionAnswering(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class TensorPipeAgentRpcTest(RpcAgentTestFixture):
def test_mismatched_type_for_options(self):
rpc_backend_options = FooBackendOptions(self.init_method)
with self.assertRaisesRegex(TypeError, '`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`'):
rpc.init_rpc(name=worker_name(self.rank), rank=self.rank, world_size=self.world_size, backend=rpc.BackendType.TENSORPIPE, rpc_backend_options=rpc_backend_options)
def test_infer_backend_from_options(self):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method=self.init_method)
rpc.init_rpc(name=worker_name(self.rank), rank=self.rank, world_size=self.world_size, rpc_backend_options=rpc_backend_options)
self.assertIsInstance(rpc.api._get_current_rpc_agent(), rpc.TensorPipeAgent)
_init(setup_rpc=False)
def test_set_and_get_num_worker_threads(self):
NUM_THREADS = 27
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method=self.rpc_backend_options.init_method, num_worker_threads=NUM_THREADS)
rpc.init_rpc(name=worker_name(self.rank), backend=self.rpc_backend, rank=self.rank, world_size=self.world_size, rpc_backend_options=rpc_backend_options)
info = rpc.api._get_current_rpc_agent().get_debug_info()
self.assertEqual(int(info['agent.thread_pool_size']), NUM_THREADS)
rpc.shutdown()
_init(setup_rpc=False)
def test_tensorpipe_set_default_timeout(self):
timeout = 0.5
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method=self.rpc_backend_options.init_method, num_worker_threads=self.rpc_backend_options.num_worker_threads, rpc_timeout=timeout)
rpc.init_rpc(name=worker_name(self.rank), backend=self.rpc_backend, rank=self.rank, world_size=self.world_size, rpc_backend_options=rpc_backend_options)
default_timeout = rpc.get_rpc_timeout()
self.assertEqual(default_timeout, timeout)
rpc.shutdown()
_init(setup_rpc=False)
def test_tensorpipe_options_throw_on_timedelta_timeout(self):
from datetime import timedelta
timeout = timedelta()
with self.assertRaisesRegex(TypeError, 'incompatible constructor arguments'):
rpc_backend_options = rpc.TensorPipeRpcBackendOptions(init_method=self.rpc_backend_options.init_method, num_worker_threads=self.rpc_backend_options.num_worker_threads, rpc_timeout=timeout) |
class ResidualCNN(nn.Module):
def __init__(self, in_channels, out_channels, kernel, stride, dropout, n_feats):
super(ResidualCNN, self).__init__()
self.cnn1 = nn.Conv2d(in_channels, out_channels, kernel, stride, padding=(kernel // 2))
self.cnn2 = nn.Conv2d(out_channels, out_channels, kernel, stride, padding=(kernel // 2))
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.layer_norm1 = CNNLayerNorm(n_feats)
self.layer_norm2 = CNNLayerNorm(n_feats)
def forward(self, x):
residual = x
x = self.layer_norm1(x)
x = F.softplus(x)
x = self.dropout1(x)
x = self.cnn1(x)
x = self.layer_norm2(x)
x = F.softplus(x)
x = self.dropout2(x)
x = self.cnn2(x)
x += residual
return x |
class DotprodAttention(nn.Module):
def __init__(self):
super().__init__()
def forward(self, feature, aspect_v, dmask):
Q = aspect_v
Q = Q.unsqueeze(2)
dot_prod = torch.bmm(feature, Q)
dmask = dmask.unsqueeze(2)
attention_weight = mask_logits(dot_prod, dmask)
attention = F.softmax(attention_weight, dim=1)
out = torch.bmm(feature.transpose(1, 2), attention)
out = out.squeeze(2)
return out |
def mvRotate(speed, angle, clockwise, verbose=0):
print(stopper)
if (stopper == False):
return
vel_msg = Twist()
rospy.loginfo('Rotate {0} degree with {1} degree/sec Clockwise = {2}'.format(speed, angle, clockwise))
angular_speed = (((speed * 2) * PI) / 360)
relative_angle = (((angle * 2) * PI) / 360)
if (angle == (- 1)):
if clockwise:
vel_msg.angular.z = (- abs(angular_speed))
else:
vel_msg.angular.z = abs(angular_speed)
velocity_publisher.publish(vel_msg)
return
if clockwise:
vel_msg.angular.z = (- abs(angular_speed))
else:
vel_msg.angular.z = abs(angular_speed)
t0 = rospy.Time.now().to_sec()
current_angle = 0
while (current_angle < relative_angle):
velocity_publisher.publish(vel_msg)
t1 = rospy.Time.now().to_sec()
current_angle = (angular_speed * (t1 - t0))
vel_msg.angular.z = 0
velocity_publisher.publish(vel_msg)
printv('STOP', verbose) |
class GaussianMLPBaseModule(nn.Module):
def __init__(self, input_dim, output_dim, hidden_sizes=(32, 32), hidden_nonlinearity=torch.tanh, hidden_w_init=nn.init.xavier_uniform_, hidden_b_init=nn.init.zeros_, output_nonlinearity=None, output_w_init=nn.init.xavier_uniform_, output_b_init=nn.init.zeros_, learn_std=True, init_std=1.0, min_std=1e-06, max_std=None, std_hidden_sizes=(32, 32), std_hidden_nonlinearity=torch.tanh, std_hidden_w_init=nn.init.xavier_uniform_, std_hidden_b_init=nn.init.zeros_, std_output_nonlinearity=None, std_output_w_init=nn.init.xavier_uniform_, std_parameterization='exp', layer_normalization=False):
super().__init__()
self._input_dim = input_dim
self._hidden_sizes = hidden_sizes
self._action_dim = output_dim
self._learn_std = learn_std
self._std_hidden_sizes = std_hidden_sizes
self._min_std = min_std
self._max_std = max_std
self._std_hidden_nonlinearity = std_hidden_nonlinearity
self._std_hidden_w_init = std_hidden_w_init
self._std_hidden_b_init = std_hidden_b_init
self._std_output_nonlinearity = std_output_nonlinearity
self._std_output_w_init = std_output_w_init
self._std_parameterization = std_parameterization
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
if (self._std_parameterization not in ('exp', 'softplus')):
raise NotImplementedError
init_std_param = torch.Tensor([init_std]).log()
if self._learn_std:
self._init_std = torch.nn.Parameter(init_std_param)
else:
self._init_std = init_std_param
self._min_std_param = self._max_std_param = None
if (min_std is not None):
self._min_std_param = torch.Tensor([min_std]).log()
if (max_std is not None):
self._max_std_param = torch.Tensor([max_std]).log()
def _get_mean_and_log_std(self, *inputs):
pass
def forward(self, *inputs):
(mean, log_std_uncentered) = self._get_mean_and_log_std(*inputs)
if (self._min_std_param or self._max_std_param):
log_std_uncentered = log_std_uncentered.clamp(min=self._to_scalar_if_not_none(self._min_std_param), max=self._to_scalar_if_not_none(self._max_std_param))
if (self._std_parameterization == 'exp'):
std = log_std_uncentered.exp()
else:
std = log_std_uncentered.exp().exp().add(1.0).log()
dist = Independent(Normal(mean, std), 1)
return dist
def _to_scalar_if_not_none(self, tensor):
return (None if (tensor is None) else tensor.item()) |
def create_X_y():
X = np.array([[(- 1), 1], [(- 0.75), 0.5], [(- 1.5), 1.5], [1, 1], [0.75, 0.5], [1.5, 1.5], [1, (- 1)], [(- 0.5), 0.5], [0.5, 0.5], [0, (- 1)], [0.75, (- 0.5)], [0.0, 0.0], [(- 1), (- 1)], [0, (- 0.5)], [1, (- 1)]])
y = np.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0])
return (X, y) |
class AlexnetCifar10Model(model.Model):
def __init__(self):
super(AlexnetCifar10Model, self).__init__('alexnet', 32, 128, 0.1)
def add_inference(self, cnn):
cnn.conv(64, 5, 5, 1, 1, 'SAME', stddev=0.05)
cnn.mpool(3, 3, 2, 2, mode='SAME')
cnn.lrn(depth_radius=4, bias=1.0, alpha=(0.001 / 9.0), beta=0.75)
cnn.conv(64, 5, 5, 1, 1, 'SAME', bias=0.1, stddev=0.05)
cnn.lrn(depth_radius=4, bias=1.0, alpha=(0.001 / 9.0), beta=0.75)
cnn.mpool(3, 3, 2, 2, mode='SAME')
shape = cnn.top_layer.get_shape().as_list()
flat_dim = ((shape[1] * shape[2]) * shape[3])
cnn.reshape([(- 1), flat_dim])
cnn.affine(384, stddev=0.04, bias=0.1)
cnn.affine(192, stddev=0.04, bias=0.1)
def get_learning_rate(self, global_step, batch_size):
num_examples_per_epoch = 50000
num_epochs_per_decay = 100
decay_steps = int(((num_epochs_per_decay * num_examples_per_epoch) / batch_size))
decay_factor = 0.1
return tf.train.exponential_decay(self.learning_rate, global_step, decay_steps, decay_factor, staircase=True) |
class EllipticCurveHom_sum(EllipticCurveHom):
_degree = None
_phis = None
def __init__(self, phis, domain=None, codomain=None):
phis = tuple(phis)
if ((not phis) and ((domain is None) or (codomain is None))):
raise ValueError('need either phis or both domain and codomain')
for phi in phis:
if (not isinstance(phi, EllipticCurveHom)):
raise ValueError(f'not an elliptic-curve morphism: {phi}')
if (domain is None):
domain = phis[0].domain()
if (codomain is None):
codomain = phis[0].codomain()
for phi in phis:
if (phi.domain() != domain):
raise ValueError(f'summand {phi} has incorrect domain (need {domain})')
if (phi.codomain() != codomain):
raise ValueError(f'summand {phi} has incorrect codomain (need {codomain})')
self._phis = phis
self._domain = domain
self._codomain = codomain
self._degree = 0
EllipticCurveHom.__init__(self, self._domain, self._codomain)
self._degree = None
def _call_(self, P):
return sum((phi(P) for phi in self._phis), self._codomain(0))
def _eval(self, P):
if self._domain.defining_polynomial()(*P):
raise ValueError(f'{P} not on {self._domain}')
k = Sequence(P).universe()
return sum((phi._eval(P) for phi in self._phis), self._codomain.base_extend(k)(0))
def _repr_(self):
return f'''Sum morphism:
From: {self._domain}
To: {self._codomain}
Via: {self._phis}'''
def summands(self):
return self._phis
_method
def to_isogeny_chain(self):
deg = self.degree()
if deg.is_zero():
raise ValueError('zero morphism cannot be written as a composition of isogenies')
p = self.base_ring().characteristic()
insep = (self.inseparable_degree().valuation(p) if p else 0)
scalar = 1
ker = []
for (l, m) in deg.factor():
if (l == p):
if (insep < m):
P = point_of_order(self.domain(), (p ** (m - insep)))
ker.append(P)
continue
F = self.domain().division_polynomial((l ** m)).splitting_field('X').extension(2, 'Y')
(P, Q) = self.domain().change_ring(F).torsion_basis((l ** m))
if self.is_endomorphism():
(R, S) = (P, Q)
else:
(R, S) = self.codomain().change_ring(F).torsion_basis((l ** m))
M = self.matrix_on_subgroup((P, Q), (R, S))
g = ZZ(gcd(M.list())).p_primary_part(l)
if (g > 1):
scalar *= g
M = (M.change_ring(ZZ) / g).change_ring(M.base_ring())
K = M.left_kernel_matrix()
for row in K:
(u, v) = map(ZZ, row)
pt = ((u * P) + (v * Q))
pt.set_order(row.additive_order())
ker.append(pt)
from sage.schemes.elliptic_curves.hom_composite import EllipticCurveHom_composite
phi = EllipticCurveHom_composite(self.domain(), [])
if (scalar != 1):
phi *= phi.codomain().scalar_multiplication(scalar)
while ker:
K = ker.pop(0)
((l, e),) = K.order().factor()
for i in reversed(range(e)):
Kl = ((l ** i) * K)
Kl.set_order(l)
from sage.groups.generic import multiples
from sage.misc.misc_c import prod
x = polygen(Kl.base_ring())
poly = prod(((x - T.xy()[0]) for T in multiples(Kl, (l // 2), Kl)))
poly = poly.change_ring(self.base_ring())
psi = phi.codomain().isogeny(poly)
phi = (psi * phi)
K = psi._eval(K)
ker = [psi._eval(P) for P in ker]
if insep:
frob = phi.codomain().frobenius_isogeny(insep)
phi = (frob * phi)
from sage.schemes.elliptic_curves.hom import find_post_isomorphism
iso = find_post_isomorphism(phi, self)
return (iso * phi)
def _degree_bounds(self):
(lo, hi) = (ZZ.zero(), ZZ.zero())
for phi in self._phis:
m = (hi * phi.degree()).isqrt()
hi += (phi.degree() + (2 * m))
lo += (phi.degree() - (2 * m))
lo = max(lo, 0)
return (lo, hi)
def _compute_degree(self):
if (self._degree is not None):
return
if (len(self._phis) == 0):
self._degree = 0
elif (len(self._phis) == 1):
self._degree = self._phis[0].degree()
else:
from sage.rings.finite_rings.integer_mod import Mod
(lo, hi) = self._degree_bounds()
M = ((hi - lo) + 1)
rem = Mod(0, 1)
for l in Primes():
if (rem.modulus() >= M):
break
try:
P = point_of_order(self._domain, l)
except ValueError:
continue
Q = self.dual()._eval(self._eval(P))
d = discrete_log(Q, P, ord=l, operation='+')
rem = rem.crt(Mod((d - lo), l))
self._degree = (lo + rem.lift())
self.dual()._degree = self._degree
def _comparison_impl(left, right, op):
from sage.structure.richcmp import op_EQ
if (op != op_EQ):
return NotImplemented
try:
return compare_via_evaluation(left, right)
except NotImplementedError:
return NotImplemented
def degree(self):
if (self._degree is None):
self._compute_degree()
return self._degree
def rational_maps(self):
return self.to_isogeny_chain().rational_maps()
def x_rational_map(self):
return self.to_isogeny_chain().x_rational_map()
def kernel_polynomial(self):
return self.to_isogeny_chain().kernel_polynomial()
def scaling_factor(self):
return sum((phi.scaling_factor() for phi in self._phis))
_method
def dual(self):
psi = EllipticCurveHom_sum((phi.dual() for phi in self._phis), domain=self._codomain, codomain=self._domain)
psi._degree = self._degree
if self.trace.is_in_cache():
psi.trace.set_cache((- self.trace.cache))
psi.dual.set_cache(self)
return psi
_method
def inseparable_degree(self):
if self.is_zero():
raise ValueError('zero morphism is not an isogeny')
p = self.base_ring().characteristic()
if (not p):
return ZZ.one()
m = self.degree().valuation(p)
if (not m):
return ZZ.one()
try:
P = point_of_order(self.domain(), (p ** m))
except ValueError:
return (p ** m)
Q = self._eval(P)
return order_from_multiple(Q, (p ** m)) |
class PredictionToGroundTruthSampler():
def __init__(self, dataset_name: str=''):
self.dataset_name = dataset_name
self._samplers = {}
self.register_sampler('pred_boxes', 'gt_boxes', None)
self.register_sampler('pred_classes', 'gt_classes', None)
self.register_sampler('scores')
def __call__(self, model_output: ModelOutput) -> SampledData:
for model_output_i in model_output:
instances: Instances = model_output_i['instances']
for (_, sampler) in self._samplers.items():
if ((not instances.has(sampler.src)) or (sampler.dst is None)):
continue
if (sampler.func is None):
instances.set(sampler.dst, instances.get(sampler.src))
else:
instances.set(sampler.dst, sampler.func(instances))
for (_, sampler) in self._samplers.items():
if ((sampler.src != sampler.dst) and instances.has(sampler.src)):
instances.remove(sampler.src)
model_output_i['dataset'] = self.dataset_name
return model_output
def register_sampler(self, prediction_attr: str, gt_attr: Optional[str]=None, func: Optional[Callable[([Any], Any)]]=None):
self._samplers[prediction_attr] = _Sampler(src=prediction_attr, dst=gt_attr, func=func) |
class DiverseBeamDecoder(BeamDecoder):
name = 'diverse_beam'
def __init__(self, decoder_args):
super(DiverseBeamDecoder, self).__init__(decoder_args)
assert (not self.gumbel)
self.beam_size = decoder_args.beam
self.num_groups = decoder_args.diversity_groups
self.lmbda = decoder_args.diversity_reward
self.group_sizes = ([(self.beam_size // self.num_groups)] * self.num_groups)
for i in range((self.beam_size - (self.group_sizes[0] * self.num_groups))):
self.group_sizes[i] += 1
assert (sum(self.group_sizes) == self.beam_size)
def _get_initial_hypos(self):
return [[PartialHypothesis(copy.deepcopy(self.get_predictor_states()), self.calculate_stats)] for i in range(self.num_groups)]
def _get_next_hypos(self, all_hypos, size, other_groups=None):
all_scores = np.array([self.get_adjusted_score(hypo) for hypo in all_hypos])
if other_groups:
all_scores = (all_scores + (self.lmbda * self.hamming_distance_penalty(all_hypos, utils.flattened(other_groups))))
inds = utils.argmax_n(all_scores, size)
return [all_hypos[ind] for ind in inds]
def decode(self, src_sentence):
self.count = 0
self.time = 0
self.initialize_predictor(src_sentence)
hypos = self._get_initial_hypos()
it = 1
while ((not self.stop_criterion(utils.flattened(hypos))) and (it < self.max_len)):
it = (it + 1)
next_hypos = []
for (i, group) in enumerate(hypos):
next_group = []
for hypo in group:
if (hypo.get_last_word() == utils.EOS_ID):
next_group.append(hypo)
continue
for next_hypo in self._expand_hypo(hypo):
next_group.append(next_hypo)
next_hypos.append(self._get_next_hypos(next_group, self.group_sizes[i], next_hypos))
hypos = next_hypos
return self.get_full_hypos_sorted(utils.flattened(hypos))
def hamming_distance_penalty(set1, set2):
longest_hypo = len(max((set1 + set2), key=len))
hypos = utils.as_ndarray(set1, min_length=longest_hypo)
other_hypos = utils.as_ndarray(set2, min_length=longest_hypo)
return np.apply_along_axis((lambda x: utils.hamming_distance(x, other_hypos)), 1, hypos)
def add_args(parser):
parser.add_argument('--diversity_groups', default=1, type=int, help="If this is greater than one, promote diversity between groups of hypotheses as in Vijayakumar et. al. (2016). Only compatible with 'diverse_beam' decoder. They found diversity_groups = beam size to be most effective.")
parser.add_argument('--diversity_reward', default=0.5, type=float, help="If this is greater than zero, add reward for diversity between groups as in Vijayakumar et. al. (2016). Only compatible with 'diverse_beam' decoder. Setting value equal to 0 recovers standard beam search.") |
def count_entity_freq(data_path):
entity_freq = collections.defaultdict(dict)
label_map = collections.defaultdict(dict)
with open(data_path, 'r') as f:
lines = f.readlines()
for line in lines:
if ((len(line) < 2) or ('-DOCSTART-' in line)):
continue
line = line.strip().split()
word = line[0]
label = line[(- 1)]
if (label != 'O'):
label = label[2:]
entity_freq[word][label] = (entity_freq[word].get(label, 0) + 1)
label_map[label][word] = (label_map[label].get(word, 0) + 1)
return (entity_freq, label_map) |
def convert_ts_unix(fname: str, outname: str):
TIME_FORMAT = '%Y-%m-%d'
with open(outname, 'w') as outf:
write = csv.writer(outf)
fields = ['ts', 'user_id', 'genre', 'weight']
write.writerow(fields)
with open(fname, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if (line_count == 0):
line_count += 1
else:
ts = datetime.datetime.strptime(row[0], TIME_FORMAT)
ts += timedelta(days=1)
ts = int(ts.timestamp())
user_id = row[1]
genre = row[2]
weight = float(row[3])
write.writerow([ts, user_id, genre, weight]) |
def _compute_variables(df: EDAFrame, cfg: Config) -> Dict[(str, Any)]:
data: Dict[(str, Any)] = {}
if cfg.variables.enable:
for col in df.columns:
try:
dtype = df.get_eda_dtype(col)
if (df.get_missing_cnt(col) == df.shape[0]):
srs = df.get_col_as_str(col, na_as_str=True)
data[col] = nom_comps(srs, cfg)
elif isinstance(dtype, (Nominal, GeoGraphy, GeoPoint)):
data[col] = nom_comps(df.frame[col], cfg)
elif isinstance(dtype, SmallCardNum):
srs = df.get_col_as_str(col, na_as_str=False)
data[col] = nom_comps(srs, cfg)
elif isinstance(dtype, Continuous):
data[col] = cont_comps(df.frame[col], cfg)
elif isinstance(dtype, DateTime):
data[col] = {}
data[col]['stats'] = calc_stats_dt(df.frame[col])
data[col]['line'] = dask.delayed(_calc_line_dt)(df.frame[[col]], 'auto')
else:
raise ValueError(f'unprocessed type in column{col}:{dtype}')
except:
print(f'error happended in column:{col}', file=sys.stderr)
raise
return data |
_optimizer('radam')
class FairseqRAdam(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args, params)
self._optimizer = RAdam(params, **self.optimizer_config)
self._optimizer.name = ((args.tb_tag + '_') + self._optimizer.name)
def add_args(parser):
parser.add_argument('--adam-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer')
parser.add_argument('--adam-eps', type=float, default=1e-08, metavar='D', help='epsilon for Adam optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
parser.add_argument('--tb-tag', default='', type=str, help='tb tag')
def optimizer_config(self):
return {'lr': self.args.lr[0], 'betas': eval(self.args.adam_betas), 'eps': self.args.adam_eps, 'weight_decay': self.args.weight_decay} |
.parametrize('statement_type,value', [(stmt.IntPrimitiveStatement, 42), (stmt.FloatPrimitiveStatement, 42.23), (stmt.StringPrimitiveStatement, 'foo'), (stmt.BytesPrimitiveStatement, b'test'), (stmt.BooleanPrimitiveStatement, True), (stmt.ComplexPrimitiveStatement, (4 + 3j))])
def test_primitive_statement_value(statement_type, default_test_case, value):
statement = statement_type(default_test_case, value)
assert (statement.value == value) |
class FlaxBigBirdForNaturalQuestions(FlaxBigBirdForQuestionAnswering):
module_class = FlaxBigBirdForNaturalQuestionsModule |
def apply_fixes(args, tmpdir):
invocation = [args.clang_apply_replacements_binary]
if args.format:
invocation.append('-format')
if args.style:
invocation.append(('-style=' + args.style))
invocation.append(tmpdir)
subprocess.call(invocation) |
class IndexedRawTextDataset(FairseqDataset):
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.tokens_list = []
self.lines = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.read_data(path, dictionary)
self.size = len(self.tokens_list)
def read_data(self, path, dictionary):
with open(path, 'r', encoding='utf-8') as f:
for line in f:
self.lines.append(line.strip('\n'))
tokens = dictionary.encode_line(line, add_if_not_exist=False, append_eos=self.append_eos, reverse_order=self.reverse_order).long()
self.tokens_list.append(tokens)
self.sizes.append(len(tokens))
self.sizes = np.array(self.sizes)
def check_index(self, i):
if ((i < 0) or (i >= self.size)):
raise IndexError('index out of range')
_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
return self.tokens_list[i]
def get_original_text(self, i):
self.check_index(i)
return self.lines[i]
def __del__(self):
pass
def __len__(self):
return self.size
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
def exists(path):
return PathManager.exists(path) |
def nes_op_ray_tracing(x_0, num_points, nes_op, target_front=0.01, solver='specified steps', direction='backward', velocity='interpolation', **kwargs):
assert (solver in solvers_list), ("Two solvers are supported: 'specified steps' and 'scipy'. " + 'Instead {} is passed.'.format(solver))
assert (direction in directions_dict), ("Two directions are supported: 'forward' and 'backward'. " + 'Instead {} is passed.'.format(direction))
assert (velocity in velocities_list), ("Two options are supported for velocity evaluation: 'interpolation' and " + "'learned velocity'.")
time_0 = np.squeeze(nes_op.Traveltime(np.atleast_2d(x_0)))
if (direction == 'forward'):
assert (target_front > time_0), 'Target time must be greater than the initial one for froward tracing.'
else:
assert (target_front < time_0), 'Target time must be less than the initial one for backward tracing.'
ray_travel_times = np.linspace(np.min([time_0, target_front]), np.max([time_0, target_front]), num_points)
steps = np.diff(ray_travel_times)
if (not ('first_step' in kwargs)):
kwargs['first_step'] = (np.min(steps) / 2)
if (not ('max_step' in kwargs)):
kwargs['max_step'] = np.max(steps)
if (solver == 'scipy'):
ray = np.transpose(solve_ivp(nes_op_rts_right_part, t_span=[ray_travel_times[0], ray_travel_times[(- 1)]], y0=x_0, args=[nes_op, direction, velocity], t_eval=ray_travel_times, **kwargs).y)
else:
ray = nintegrate_ode_system(nes_op_rts_right_part, x_0, ray_travel_times, args=[nes_op, direction, velocity])
return ray[::directions_dict[direction]] |
class TFDistilBertForTokenClassification():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def showLight():
os.chdir('./medirl-master/Code/')
VideoDir = './medirl-master/videos/crash-video'
videos = glob.glob((VideoDir + '/*.mp4'))
for v in videos:
cap = cv2.VideoCapture(v)
while cap.isOpened():
(ret, frame) = cap.read()
blur_frame = cv2.medianBlur(frame, 3)
hsv_image = cv2.cvtColor(blur_frame, cv2.COLOR_BGR2HSV)
lower_red_hue = create_hue_mask(hsv_image, [0, 100, 100], [10, 255, 255])
higher_red_hue = create_hue_mask(hsv_image, [160, 100, 100], [179, 255, 255])
full_image = cv2.addWeighted(lower_red_hue, 1.0, higher_red_hue, 1.0, 0.0)
image_gray = cv2.cvtColor(full_image, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', image_gray)
if ((cv2.waitKey(1) & 255) == ord('q')):
break
cap.release()
cv2.destroyAllWindows() |
class LSTM(nn.Module):
def __init__(self, c_mid, base_ch=256, num_layers=2, bidirectional=True):
super(LSTM, self).__init__()
self.rnn = nn.LSTM(c_mid, hidden_size=base_ch, num_layers=num_layers, bidirectional=bidirectional)
self.out_channels = (base_ch * (1 + int(bidirectional)))
def forward(self, feat):
feat = self.rnn(feat.permute(2, 0, 1))[0].permute(1, 2, 0).contiguous()
return {'1D': feat} |
def prepare_annos(dir_to_video):
vids = os.listdir(dir_to_video)
video_files = glob.glob(os.path.join(dir_to_video, '*'))
vid2annos = {vid[:(- 4)]: {} for vid in vids}
for video_file in video_files:
vid = video_file.split('/')[(- 1)][:(- 4)]
(vid2annos[vid]['duration'], vid2annos[vid]['num_frames'], vid2annos[vid]['fps']) = get_meta_info(video_file)
return vid2annos |
def open(domain_filename=None, task_filename=None):
task_filename = (task_filename or options.task)
domain_filename = (domain_filename or options.domain)
domain_pddl = parse_pddl_file('domain', domain_filename)
task_pddl = parse_pddl_file('task', task_filename)
return parsing_functions.parse_task(domain_pddl, task_pddl) |
def main():
parser = get_arg_parser()
opt = parser.parse_args()
opt.enable_lidar = True
kitti360_sequence_ids = ['1538', '1728', '1908', '3353']
nerf_mvl_sequence_ids = ['bollard', 'car', 'pedestrian', 'pier', 'plant', 'tire', 'traffic_cone', 'warning_sign', 'water_safety_barrier']
if (opt.dataloader == 'kitti360'):
from lidarnerf.dataset.kitti360_dataset import KITTI360Dataset as NeRFDataset
if (opt.sequence_id not in kitti360_sequence_ids):
raise ValueError(f'Unknown sequence id {opt.sequence_id} for {opt.dataloader}')
elif (opt.dataloader == 'nerf_mvl'):
from lidarnerf.dataset.nerfmvl_dataset import NeRFMVLDataset as NeRFDataset
if (opt.sequence_id not in nerf_mvl_sequence_ids):
raise ValueError(f'Unknown sequence id {opt.sequence_id} for {opt.dataloader}')
else:
raise RuntimeError('Should not reach here.')
os.makedirs(opt.workspace, exist_ok=True)
f = os.path.join(opt.workspace, 'args.txt')
with open(f, 'w') as file:
for arg in vars(opt):
attr = getattr(opt, arg)
file.write('{} = {}\n'.format(arg, attr))
if opt.L:
opt.fp16 = True
opt.tcnn = True
opt.preload = True
if (opt.patch_size > 1):
assert ((opt.num_rays % (opt.patch_size ** 2)) == 0), 'patch_size ** 2 should be dividable by num_rays.'
opt.min_near = opt.scale
opt.min_near_lidar = opt.scale
if opt.tcnn:
opt.fp16 = True
assert (opt.bg_radius <= 0), 'background model is not implemented for --tcnn'
from lidarnerf.nerf.network_tcnn import NeRFNetwork
model = NeRFNetwork(encoding='hashgrid', desired_resolution=opt.desired_resolution, log2_hashmap_size=opt.log2_hashmap_size, n_features_per_level=opt.n_features_per_level, num_layers=opt.num_layers, hidden_dim=opt.hidden_dim, geo_feat_dim=opt.geo_feat_dim, bound=opt.bound, density_scale=1, min_near=opt.min_near, min_near_lidar=opt.min_near_lidar, density_thresh=opt.density_thresh, bg_radius=opt.bg_radius)
else:
from lidarnerf.nerf.network import NeRFNetwork
model = NeRFNetwork(encoding='hashgrid', desired_resolution=opt.desired_resolution, log2_hashmap_size=opt.log2_hashmap_size, num_layers=opt.num_layers, hidden_dim=opt.hidden_dim, geo_feat_dim=opt.geo_feat_dim, bound=opt.bound, density_scale=1, min_near=opt.min_near, density_thresh=opt.density_thresh, bg_radius=opt.bg_radius)
print(opt)
seed_everything(opt.seed)
print(model)
loss_dict = {'mse': torch.nn.MSELoss(reduction='none'), 'l1': torch.nn.L1Loss(reduction='none'), 'bce': torch.nn.BCEWithLogitsLoss(reduction='none'), 'huber': torch.nn.HuberLoss(reduction='none', delta=(0.2 * opt.scale)), 'cos': torch.nn.CosineSimilarity()}
criterion = {'depth': loss_dict[opt.depth_loss], 'raydrop': loss_dict[opt.raydrop_loss], 'intensity': loss_dict[opt.intensity_loss], 'grad': loss_dict[opt.depth_grad_loss]}
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
if (opt.test or opt.test_eval):
test_loader = NeRFDataset(device=device, split='test', root_path=opt.path, sequence_id=opt.sequence_id, preload=opt.preload, scale=opt.scale, offset=opt.offset, fp16=opt.fp16, patch_size_lidar=opt.patch_size_lidar, enable_lidar=opt.enable_lidar, num_rays_lidar=opt.num_rays_lidar).dataloader()
if opt.enable_lidar:
depth_metrics = [MAEMeter(intensity_inv_scale=opt.intensity_inv_scale), RMSEMeter(), DepthMeter(scale=opt.scale), PointsMeter(scale=opt.scale, intrinsics=test_loader._data.intrinsics_lidar)]
else:
depth_metrics = []
trainer = Trainer('lidar_nerf', opt, model, device=device, workspace=opt.workspace, criterion=criterion, fp16=opt.fp16, depth_metrics=depth_metrics, use_checkpoint=opt.ckpt)
if (test_loader.has_gt and opt.test_eval):
trainer.evaluate(test_loader)
trainer.test(test_loader, write_video=False)
trainer.save_mesh(resolution=128, threshold=10)
else:
optimizer = (lambda model: torch.optim.Adam(model.get_params(opt.lr), betas=(0.9, 0.99), eps=1e-15))
train_loader = NeRFDataset(device=device, split='train', root_path=opt.path, sequence_id=opt.sequence_id, preload=opt.preload, scale=opt.scale, offset=opt.offset, fp16=opt.fp16, patch_size_lidar=opt.patch_size_lidar, enable_lidar=opt.enable_lidar, num_rays_lidar=opt.num_rays_lidar).dataloader()
scheduler = (lambda optimizer: torch.optim.lr_scheduler.LambdaLR(optimizer, (lambda iter: (0.1 ** min((iter / opt.iters), 1)))))
if opt.enable_lidar:
depth_metrics = [MAEMeter(intensity_inv_scale=opt.intensity_inv_scale), RMSEMeter(), DepthMeter(scale=opt.scale), PointsMeter(scale=opt.scale, intrinsics=train_loader._data.intrinsics_lidar)]
else:
depth_metrics = []
trainer = Trainer('lidar_nerf', opt, model, device=device, workspace=opt.workspace, optimizer=optimizer, criterion=criterion, ema_decay=0.95, fp16=opt.fp16, lr_scheduler=scheduler, scheduler_update_every_step=True, depth_metrics=depth_metrics, use_checkpoint=opt.ckpt, eval_interval=opt.eval_interval)
valid_loader = NeRFDataset(device=device, split='val', root_path=opt.path, sequence_id=opt.sequence_id, preload=opt.preload, scale=opt.scale, offset=opt.offset, fp16=opt.fp16, patch_size_lidar=opt.patch_size_lidar, enable_lidar=opt.enable_lidar, num_rays_lidar=opt.num_rays_lidar).dataloader()
max_epoch = np.ceil((opt.iters / len(train_loader))).astype(np.int32)
print(f'max_epoch: {max_epoch}')
trainer.train(train_loader, valid_loader, max_epoch)
test_loader = NeRFDataset(device=device, split='test', root_path=opt.path, sequence_id=opt.sequence_id, preload=opt.preload, scale=opt.scale, offset=opt.offset, fp16=opt.fp16, patch_size_lidar=opt.patch_size_lidar, enable_lidar=opt.enable_lidar, num_rays_lidar=opt.num_rays_lidar).dataloader()
if test_loader.has_gt:
trainer.evaluate(test_loader)
trainer.test(test_loader, write_video=True)
trainer.save_mesh(resolution=128, threshold=10) |
_grad()
def convert_hifigan_checkpoint(checkpoint_path, stats_path, pytorch_dump_folder_path, config_path=None, repo_id=None):
if (config_path is not None):
config = SpeechT5HifiGanConfig.from_pretrained(config_path)
else:
config = SpeechT5HifiGanConfig()
model = SpeechT5HifiGan(config)
orig_checkpoint = torch.load(checkpoint_path)
load_weights(orig_checkpoint['model']['generator'], model, config)
stats = np.load(stats_path)
mean = stats[0].reshape((- 1))
scale = stats[1].reshape((- 1))
model.mean = torch.from_numpy(mean).float()
model.scale = torch.from_numpy(scale).float()
model.save_pretrained(pytorch_dump_folder_path)
if repo_id:
print('Pushing to the hub...')
model.push_to_hub(repo_id) |
def postprocess_image(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
image = UnNormalize(mean, std)(image)
return (image * 255).squeeze(0).transpose(1, 2, 0).numpy().astype(np.uint8) |
def _test_reshape_output_and_gradient(old_shape, new_shape, expected_shape=None, arg_shape=True, in_place=False, expected_gradient=None):
devices = [core.DeviceOption(caffe2_pb2.CPU, 0)]
if (workspace.NumGpuDevices() > 0):
devices.append(core.DeviceOption(workspace.GpuDeviceType, 0))
for device_opt in devices:
with core.DeviceScope(device_opt):
if (expected_shape is None):
expected_shape = new_shape
net = core.Net('net')
if (len(old_shape) == 0):
X = np.atleast_1d(np.random.rand(*old_shape))
else:
X = np.random.rand(*old_shape).astype(np.float32)
blob_in = 'X'
blob_out = (blob_in if in_place else (blob_in + '_out'))
if arg_shape:
(out, _) = net.Reshape([blob_in], [blob_out, 'old_shape'], shape=new_shape)
else:
(out, _) = net.Reshape([blob_in, 'new_shape'], [blob_out, 'old_shape'])
workspace.FeedBlob('new_shape', np.asarray(new_shape))
workspace.FeedBlob(blob_in, X)
if (expected_gradient is not None):
net.AddGradientOperators([out])
workspace.CreateNet(net)
workspace.RunNetOnce(net)
Y = workspace.FetchBlob(blob_out)
np.testing.assert_allclose(Y, X.reshape(expected_shape))
if (expected_gradient is not None):
data_grad = workspace.FetchBlob((blob_in + '_grad'))
np.testing.assert_array_equal(data_grad, expected_gradient) |
def grail_enum_one_hop_one_entity_candidates(entity: str, use_master=True):
if (CacheBackend.cache is not None):
(in_relations_e, out_relations_e) = CacheBackend.cache.query_relations(entity)
else:
(in_relations_e, out_relations_e) = get_adjacent_relations(entity)
(in_relations_e, out_relations_e) = grail_rm_redundancy_adjancent_relations(in_relations_e, out_relations_e, use_master=use_master)
dataset = 'grail'
lfs = []
if (len(in_relations_e) > 0):
for r in in_relations_e:
if (not legal_relation(r, dataset)):
continue
domain_r = relations_info[r][0]
sub_domains = resolve_cvt_sub_classes(domain_r, dataset)
for sub_domain_r in sub_domains:
G = nx.MultiDiGraph()
node = {'nid': 0, 'id': entity, 'node_type': 'entity', 'question_node': 0, 'function': 'none'}
G.add_node(node['nid'], id=node['id'], type=node['node_type'], question=node['question_node'], function=node['function'])
node1 = {'nid': 1, 'id': sub_domain_r, 'node_type': 'class', 'question_node': 1, 'function': 'none'}
G.add_node(node1['nid'], id=node1['id'], type=node1['node_type'], question=node1['question_node'], function=node1['function'])
G.add_edge(1, 0, relation=r, reverse=False, visited=False)
G.add_edge(0, 1, relation=r, reverse=True, visited=False)
G1 = deepcopy(G)
lf = none_function(G, 1)
lfs.append(lf)
lf = count_function(G1, 1)
lfs.append(lf)
if (len(out_relations_e) > 0):
for r in out_relations_e:
if (not legal_relation(r, dataset)):
continue
range_r = relations_info[r][1]
sub_ranges = resolve_cvt_sub_classes(range_r, dataset)
for sub_range_r in sub_ranges:
G = nx.MultiDiGraph()
node = {'nid': 0, 'id': entity, 'node_type': 'entity', 'question_node': 0, 'function': 'none'}
G.add_node(node['nid'], id=node['id'], type=node['node_type'], question=node['question_node'], function=node['function'])
node1 = {'nid': 1, 'id': sub_range_r, 'node_type': 'class', 'question_node': 1, 'function': 'none'}
G.add_node(node1['nid'], id=node1['id'], type=node1['node_type'], question=node1['question_node'], function=node1['function'])
G.add_edge(0, 1, relation=r, reverse=False, visited=False)
G.add_edge(1, 0, relation=r, reverse=True, visited=False)
G1 = deepcopy(G)
lf = none_function(G, 1)
lfs.append(lf)
lf = count_function(G1, 1)
lfs.append(lf)
return lfs |
class AADGenerator(nn.Module):
def __init__(self, c_id=256):
super(AADGenerator, self).__init__()
self.up1 = nn.ConvTranspose2d(c_id, 1024, kernel_size=2, stride=1, padding=0)
self.AADBlk1 = AAD_ResBlk(1024, 1024, 1024, c_id)
self.AADBlk2 = AAD_ResBlk(1024, 1024, 2048, c_id)
self.AADBlk3 = AAD_ResBlk(1024, 1024, 1024, c_id)
self.AADBlk4 = AAD_ResBlk(1024, 512, 512, c_id)
self.AADBlk5 = AAD_ResBlk(512, 256, 256, c_id)
self.AADBlk6 = AAD_ResBlk(256, 128, 128, c_id)
self.AADBlk7 = AAD_ResBlk(128, 64, 64, c_id)
self.AADBlk8 = AAD_ResBlk(64, 3, 64, c_id)
self.apply(weight_init)
def forward(self, z_attr, z_id):
m = self.up1(z_id.reshape(z_id.shape[0], (- 1), 1, 1))
m2 = F.interpolate(self.AADBlk1(m, z_attr[0], z_id), scale_factor=2, mode='bilinear', align_corners=True)
m3 = F.interpolate(self.AADBlk2(m2, z_attr[1], z_id), scale_factor=2, mode='bilinear', align_corners=True)
m4 = F.interpolate(self.AADBlk3(m3, z_attr[2], z_id), scale_factor=2, mode='bilinear', align_corners=True)
m5 = F.interpolate(self.AADBlk4(m4, z_attr[3], z_id), scale_factor=2, mode='bilinear', align_corners=True)
m6 = F.interpolate(self.AADBlk5(m5, z_attr[4], z_id), scale_factor=2, mode='bilinear', align_corners=True)
m7 = F.interpolate(self.AADBlk6(m6, z_attr[5], z_id), scale_factor=2, mode='bilinear', align_corners=True)
m8 = F.interpolate(self.AADBlk7(m7, z_attr[6], z_id), scale_factor=2, mode='bilinear', align_corners=True)
y = self.AADBlk8(m8, z_attr[7], z_id)
return torch.tanh(y) |
class SortingHelpFormatter(HelpFormatter):
def add_arguments(self, actions):
actions = sorted(actions, key=attrgetter('option_strings'))
super(SortingHelpFormatter, self).add_arguments(actions) |
def register_Ns3QosTxop_methods(root_module, cls):
cls.add_instance_attribute('m_aMpduEnabled', 'std::map< ns3::Mac48Address, bool >', is_const=False)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('IsQosTxop', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('SetWifiRemoteStationManager', 'void', [param('ns3::Ptr< ns3::WifiRemoteStationManager > const', 'remoteManager')], is_virtual=True)
cls.add_method('SetTypeOfStation', 'void', [param('ns3::TypeOfStation', 'type')])
cls.add_method('GetTypeOfStation', 'ns3::TypeOfStation', [], is_const=True)
cls.add_method('GetBaAgreementEstablished', 'bool', [param('ns3::Mac48Address', 'address'), param('uint8_t', 'tid')], is_const=True)
cls.add_method('CompleteAmpduTransfer', 'void', [param('ns3::Mac48Address', 'recipient'), param('uint8_t', 'tid')])
cls.add_method('GetBaBufferSize', 'uint16_t', [param('ns3::Mac48Address', 'address'), param('uint8_t', 'tid')], is_const=True)
cls.add_method('NotifyAccessGranted', 'void', [], is_virtual=True)
cls.add_method('NotifyInternalCollision', 'void', [], is_virtual=True)
cls.add_method('NotifyCollision', 'void', [], is_virtual=True)
cls.add_method('MissedCts', 'void', [], is_virtual=True)
cls.add_method('GotAck', 'void', [], is_virtual=True)
cls.add_method('GotBlockAck', 'void', [param('ns3::CtrlBAckResponseHeader const *', 'blockAck'), param('ns3::Mac48Address', 'recipient'), param('double', 'rxSnr'), param('ns3::WifiMode', 'txMode'), param('double', 'dataSnr')], is_virtual=True)
cls.add_method('MissedBlockAck', 'void', [param('uint8_t', 'nMpdus')], is_virtual=True)
cls.add_method('GotAddBaResponse', 'void', [param('ns3::MgtAddBaResponseHeader const *', 'respHdr'), param('ns3::Mac48Address', 'recipient')])
cls.add_method('GotDelBaFrame', 'void', [param('ns3::MgtDelBaHeader const *', 'delBaHdr'), param('ns3::Mac48Address', 'recipient')])
cls.add_method('MissedAck', 'void', [], is_virtual=True)
cls.add_method('StartNextPacket', 'void', [], is_virtual=True)
cls.add_method('EndTxNoAck', 'void', [], is_virtual=True)
cls.add_method('RestartAccessIfNeeded', 'void', [], is_virtual=True)
cls.add_method('StartAccessIfNeeded', 'void', [], is_virtual=True)
cls.add_method('NeedBarRetransmission', 'bool', [])
cls.add_method('NeedFragmentation', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('GetFragmentPacket', 'ns3::Ptr< ns3::Packet >', [param('ns3::WifiMacHeader *', 'hdr')], is_virtual=True)
cls.add_method('SetAccessCategory', 'void', [param('ns3::AcIndex', 'ac')])
cls.add_method('PushFront', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet'), param('ns3::WifiMacHeader const &', 'hdr')])
cls.add_method('CompleteConfig', 'void', [])
cls.add_method('SetBlockAckThreshold', 'void', [param('uint8_t', 'threshold')])
cls.add_method('GetBlockAckThreshold', 'uint8_t', [], is_const=True)
cls.add_method('SetBlockAckInactivityTimeout', 'void', [param('uint16_t', 'timeout')])
cls.add_method('SendDelbaFrame', 'void', [param('ns3::Mac48Address', 'addr'), param('uint8_t', 'tid'), param('bool', 'byOriginator')])
cls.add_method('CompleteMpduTx', 'void', [param('ns3::Ptr< ns3::WifiMacQueueItem const >', 'mpdu')])
cls.add_method('GetAmpduExist', 'bool', [param('ns3::Mac48Address', 'dest')], is_const=True)
cls.add_method('SetAmpduExist', 'void', [param('ns3::Mac48Address', 'dest'), param('bool', 'enableAmpdu')])
cls.add_method('SetAddBaResponseTimeout', 'void', [param('ns3::Time', 'addBaResponseTimeout')])
cls.add_method('GetAddBaResponseTimeout', 'ns3::Time', [], is_const=True)
cls.add_method('SetFailedAddBaTimeout', 'void', [param('ns3::Time', 'failedAddBaTimeout')])
cls.add_method('GetFailedAddBaTimeout', 'ns3::Time', [], is_const=True)
cls.add_method('GetNextSequenceNumberFor', 'uint16_t', [param('ns3::WifiMacHeader const *', 'hdr')])
cls.add_method('PeekNextSequenceNumberFor', 'uint16_t', [param('ns3::WifiMacHeader const *', 'hdr')])
cls.add_method('RemoveRetransmitPacket', 'void', [param('uint8_t', 'tid'), param('ns3::Mac48Address', 'recipient'), param('uint16_t', 'seqnumber')])
cls.add_method('PeekNextRetransmitPacket', 'ns3::Ptr< ns3::WifiMacQueueItem const >', [param('uint8_t', 'tid'), param('ns3::Mac48Address', 'recipient')])
cls.add_method('BaTxOk', 'void', [param('ns3::WifiMacHeader const &', 'hdr')])
cls.add_method('BaTxFailed', 'void', [param('ns3::WifiMacHeader const &', 'hdr')])
cls.add_method('MapSrcAddressForAggregation', 'ns3::Mac48Address', [param('ns3::WifiMacHeader const &', 'hdr')])
cls.add_method('MapDestAddressForAggregation', 'ns3::Mac48Address', [param('ns3::WifiMacHeader const &', 'hdr')])
cls.add_constructor([param('ns3::QosTxop const &', 'arg0')])
cls.add_method('HasTxop', 'bool', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('GetNextFragmentSize', 'uint32_t', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('GetFragmentSize', 'uint32_t', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('GetFragmentOffset', 'uint32_t', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('IsLastFragment', 'bool', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True)
cls.add_method('DoInitialize', 'void', [], visibility='private', is_virtual=True)
return |
class ResnetMemongerTest(hu.HypothesisTestCase):
(with_shapes=st.booleans(), **hu.gcs_cpu_only)
(max_examples=2, deadline=None)
def test_resnet_shared_grads(self, with_shapes, gc, dc):
results = utils.test_shared_grads(with_shapes, resnet.create_resnet50, 'gpu_0/conv1_w', 'gpu_0/last_out_L1000')
self.assertTrue((results[0][0] < results[0][1]))
np.testing.assert_almost_equal(results[1][0], results[1][1])
np.testing.assert_almost_equal(results[2][0], results[2][1])
def test_resnet_forward_only(self):
results = utils.test_forward_only(resnet.create_resnet50, 'gpu_0/last_out_L1000')
self.assertTrue((results[0][0] < results[0][1]))
self.assertTrue(((results[1] < 7) and (results[1] > 0)))
np.testing.assert_almost_equal(results[2][0], results[2][1])
def test_resnet_forward_only_fast_simplenet(self):
results = utils.test_forward_only_fast_simplenet(resnet.create_resnet50, 'gpu_0/last_out_L1000')
self.assertTrue((results[0][0] < results[0][1]))
self.assertTrue(((results[1] < 4) and (results[1] > 0)))
np.testing.assert_almost_equal(results[2][0], results[2][1]) |
def tf_efficientnet_b4(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs)
return model |
class Window():
def __init__(self, title='PIL', width=None, height=None):
self.hwnd = Image.core.createwindow(title, self.__dispatcher, (width or 0), (height or 0))
def __dispatcher(self, action, *args):
return getattr(self, ('ui_handle_' + action))(*args)
def ui_handle_clear(self, dc, x0, y0, x1, y1):
pass
def ui_handle_damage(self, x0, y0, x1, y1):
pass
def ui_handle_destroy(self):
pass
def ui_handle_repair(self, dc, x0, y0, x1, y1):
pass
def ui_handle_resize(self, width, height):
pass
def mainloop(self):
Image.core.eventloop() |
def add_optimization_args(parser):
group = parser.add_argument_group('optimization')
gen_parser_from_dataclass(group, OptimizationConfig())
return group |
def barrier(group=group.WORLD):
assert (torch.distributed._initialized == _INITIALIZED_PG), 'collective only supported in process-group mode'
return torch._C._dist_barrier(group) |
def srwl_opt_setup_mask(_delta, _atten_len, _thick, _hx, _hy, _pitch_x, _pitch_y, _mask_Nx, _mask_Ny, _grid_nx, _grid_ny, _grid_sh, _grid_dx, _grid_dy=0, _grid_angle=0, _mask_x0=0, _mask_y0=0):
input_parms = {'type': 'mask', 'refractiveIndex': _delta, 'attenuationLength': _atten_len, 'maskThickness': _thick, 'gridShape': _grid_sh, 'horizontalGridDimension': _grid_dx, 'verticalGridDimension': _grid_dy, 'horizontalGridPitch': _pitch_x, 'verticalGridPitch': _pitch_y, 'horizontalGridsNumber': _grid_nx, 'verticalGridsNumber': _grid_ny, 'horizontalPixelsNumber': _mask_Nx, 'verticalPixelsNumber': _mask_Ny, 'gridTiltAngle': _grid_angle, 'horizontalSamplingInterval': _hx, 'verticalSamplingInterval': _mask_Ny, 'horizontalMaskCoordinate': _mask_x0, 'verticalMaskCoordinate': _mask_y0}
if (_grid_dy == 0):
_grid_dy = _grid_dx
if (_grid_sh == 2):
_grid_dx = _pitch_x
_grid_dy = _pitch_y
mask_Rx = (_hx * _mask_Nx)
mask_Ry = (_hy * _mask_Nx)
grid_Rx = (_pitch_x * _grid_nx)
grid_Ry = (_pitch_y * _grid_ny)
trans_opt = SRWLOptT(_nx=_mask_Nx, _ny=_mask_Ny, _rx=mask_Rx, _ry=mask_Ry, _arTr=None, _extTr=0, _x=0, _y=0)
pointer = 0
y = ((- mask_Ry) / 2)
xCross1 = None
yCross1 = None
xCross2 = None
yCross2 = None
k1 = None
k2 = None
k3 = None
k4 = None
if (_grid_sh == 1):
grid_dx_d_sqrt2 = (_grid_dx / sqrt(2))
cosGridAng = cos(_grid_angle)
sinGridAng = sin(_grid_angle)
xCross2 = (grid_dx_d_sqrt2 * cosGridAng)
xCross1 = (- xCross2)
yCross2 = (grid_dx_d_sqrt2 * sinGridAng)
yCross1 = (- yCross2)
k1 = tan(((pi / 4) + _grid_angle))
k2 = (- tan(((pi / 4) - _grid_angle)))
k4 = tan(((pi / 4) + _grid_angle))
k3 = (- tan(((pi / 4) - _grid_angle)))
for iy in range(_mask_Ny):
pitch_num_y = floor(round((y / _pitch_y), 9))
y_rel = ((y - (pitch_num_y * _pitch_y)) - _mask_y0)
if (y_rel >= (_pitch_y / 2)):
y_rel -= _pitch_y
x = ((- mask_Rx) / 2)
for ix in range(_mask_Nx):
pitch_num_x = floor(round((x / _pitch_x), 9))
x_rel = ((x - (pitch_num_x * _pitch_x)) - _mask_x0)
if (x_rel >= (_pitch_x / 2)):
x_rel -= _pitch_x
inside_hole = False
phase_shift = False
if (_grid_sh == 0):
if (((((x_rel / _grid_dx) ** 2) + ((y_rel / _grid_dy) ** 2)) < 1) and (not ((round((x_rel - (x - _mask_x0)), 9) == 0) and (round((y_rel - (y - _mask_y0)), 9) == 0))) and (abs(x) < (grid_Rx / 2)) and (abs(y) < (grid_Ry / 2))):
inside_hole = True
elif (_grid_sh == 1):
if ((((k2 * x_rel) + (yCross2 - (k2 * xCross2))) > y_rel > ((k3 * x_rel) + (yCross1 - (k3 * xCross1)))) and (((k1 * x_rel) + (yCross1 - (k1 * xCross1))) > y_rel > ((k4 * x_rel) + (yCross2 - (k4 * xCross2)))) and (not ((abs((x - _mask_x0)) < (_pitch_x / 2)) and (abs((y - _mask_y0)) < (_pitch_y / 2)))) and (abs(x) < (grid_Rx / 2)) and (abs(y) < (grid_Ry / 2))):
inside_hole = True
print(y)
elif (_grid_sh == 2):
phase_shift = False
if (((x_rel >= 0) and (y_rel < 0)) or ((x_rel < 0) and (y_rel >= 0))):
phase_shift = True
else:
raise ValueError('Unknown shape code.')
if (inside_hole and (not (_grid_sh == 2))):
trans_opt.arTr[pointer] = 1
trans_opt.arTr[(pointer + 1)] = 0
else:
trans_opt.arTr[pointer] = 0
trans_opt.arTr[(pointer + 1)] = 0
if (_grid_sh == 2):
if phase_shift:
trans_opt.arTr[pointer] = exp((((- 0.5) * _thick) / _atten_len))
trans_opt.arTr[(pointer + 1)] = ((- _delta) * _thick)
else:
trans_opt.arTr[pointer] = 1
trans_opt.arTr[(pointer + 1)] = 0
if (not ((abs(x) < (grid_Rx / 2)) and (abs(y) < (grid_Ry / 2)))):
trans_opt.arTr[pointer] = 0
trans_opt.arTr[(pointer + 1)] = 0
pointer += 2
x += _hx
y += _hy
trans_opt.input_parms = input_parms
return trans_opt |
def map_to_limited_gpus(func, configs, NUM_AVAIALBLE_GPUS, CUDA_VISIBLE_DEVICES=None):
with Manager() as manager:
q = manager.Queue()
if CUDA_VISIBLE_DEVICES:
for i in CUDA_VISIBLE_DEVICES:
q.put(i)
else:
for i in range(NUM_AVAIALBLE_GPUS):
q.put(i)
Parallel(n_jobs=NUM_AVAIALBLE_GPUS, verbose=10)((delayed(run_function)(func, cfg, q) for cfg in configs)) |
def pre_build_hook(build_ext, ext):
from scipy._build_utils.compiler_helper import get_cxx_std_flag, try_add_flag, try_compile, has_flag
cc = build_ext._cxx_compiler
args = ext.extra_compile_args
std_flag = get_cxx_std_flag(build_ext._cxx_compiler)
if (std_flag is not None):
args.append(std_flag)
if (cc.compiler_type == 'msvc'):
args.append('/EHsc')
else:
try_add_flag(args, cc, '-fvisibility=hidden')
has_pthreads = try_compile(cc, code='#include <pthread.h>\nint main(int argc, char **argv) {}')
if has_pthreads:
ext.define_macros.append(('POCKETFFT_PTHREADS', None))
min_macos_flag = '-mmacosx-version-min=10.9'
import sys
if ((sys.platform == 'darwin') and has_flag(cc, min_macos_flag)):
args.append(min_macos_flag)
ext.extra_link_args.append(min_macos_flag) |
def register_Ns3HtCapabilities_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_constructor([param('ns3::HtCapabilities const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DeserializeInformationField', 'uint8_t', [param('ns3::Buffer::Iterator', 'start'), param('uint8_t', 'length')], is_virtual=True)
cls.add_method('ElementId', 'ns3::WifiInformationElementId', [], is_const=True, is_virtual=True)
cls.add_method('GetAmpduParameters', 'uint8_t', [], is_const=True)
cls.add_method('GetAntennaSelectionCapabilities', 'uint8_t', [], is_const=True)
cls.add_method('GetExtendedHtCapabilities', 'uint16_t', [], is_const=True)
cls.add_method('GetGreenfield', 'uint8_t', [], is_const=True)
cls.add_method('GetHtCapabilitiesInfo', 'uint16_t', [], is_const=True)
cls.add_method('GetInformationFieldSize', 'uint8_t', [], is_const=True, is_virtual=True)
cls.add_method('GetLdpc', 'uint8_t', [], is_const=True)
cls.add_method('GetMaxAmpduLength', 'uint32_t', [], is_const=True)
cls.add_method('GetMaxAmsduLength', 'uint16_t', [], is_const=True)
cls.add_method('GetRxHighestSupportedAntennas', 'uint8_t', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint16_t', [], is_const=True)
cls.add_method('GetShortGuardInterval20', 'uint8_t', [], is_const=True)
cls.add_method('GetSupportedChannelWidth', 'uint8_t', [], is_const=True)
cls.add_method('GetSupportedMcsSet1', 'uint64_t', [], is_const=True)
cls.add_method('GetSupportedMcsSet2', 'uint64_t', [], is_const=True)
cls.add_method('GetTxBfCapabilities', 'uint32_t', [], is_const=True)
cls.add_method('IsSupportedMcs', 'bool', [param('uint8_t', 'mcs')], is_const=True)
cls.add_method('Serialize', 'ns3::Buffer::Iterator', [param('ns3::Buffer::Iterator', 'start')], is_const=True)
cls.add_method('SerializeInformationField', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetAmpduParameters', 'void', [param('uint8_t', 'ctrl')])
cls.add_method('SetAntennaSelectionCapabilities', 'void', [param('uint8_t', 'ctrl')])
cls.add_method('SetExtendedHtCapabilities', 'void', [param('uint16_t', 'ctrl')])
cls.add_method('SetGreenfield', 'void', [param('uint8_t', 'greenfield')])
cls.add_method('SetHtCapabilitiesInfo', 'void', [param('uint16_t', 'ctrl')])
cls.add_method('SetHtSupported', 'void', [param('uint8_t', 'htsupported')])
cls.add_method('SetLSigProtectionSupport', 'void', [param('uint8_t', 'lsigprotection')])
cls.add_method('SetLdpc', 'void', [param('uint8_t', 'ldpc')])
cls.add_method('SetMaxAmpduLength', 'void', [param('uint32_t', 'maxampdulength')])
cls.add_method('SetMaxAmsduLength', 'void', [param('uint16_t', 'maxamsdulength')])
cls.add_method('SetRxHighestSupportedDataRate', 'void', [param('uint16_t', 'maxsupportedrate')])
cls.add_method('SetRxMcsBitmask', 'void', [param('uint8_t', 'index')])
cls.add_method('SetShortGuardInterval20', 'void', [param('uint8_t', 'shortguardinterval')])
cls.add_method('SetShortGuardInterval40', 'void', [param('uint8_t', 'shortguardinterval')])
cls.add_method('SetSupportedChannelWidth', 'void', [param('uint8_t', 'supportedchannelwidth')])
cls.add_method('SetSupportedMcsSet', 'void', [param('uint64_t', 'ctrl1'), param('uint64_t', 'ctrl2')])
cls.add_method('SetTxBfCapabilities', 'void', [param('uint32_t', 'ctrl')])
cls.add_method('SetTxMaxNSpatialStreams', 'void', [param('uint8_t', 'maxtxspatialstreams')])
cls.add_method('SetTxMcsSetDefined', 'void', [param('uint8_t', 'txmcssetdefined')])
cls.add_method('SetTxRxMcsSetUnequal', 'void', [param('uint8_t', 'txrxmcssetunequal')])
cls.add_method('SetTxUnequalModulation', 'void', [param('uint8_t', 'txunequalmodulation')])
return |
class CNN_Encoder(nn.Module):
def __init__(self, ch, num_pitch, latent_dim):
super(CNN_Encoder, self).__init__()
self.first_layer = CNNBlock(num_pitch, ch, kernel_size=3, padding=1)
self.cnn_layer = nn.ModuleList([CNNBlock(ch, ch, kernel_size=3, stride=2, padding=1) for i in range(2)])
self.res_layer = nn.ModuleList([ResBlock(ch, kernel_size=3, padding=1) for i in range(2)])
self.mu = nn.Linear((32 * ch), latent_dim)
self.std = nn.Linear((32 * ch), latent_dim)
def encode(self, x):
x = self.first_layer(x)
for i in range(2):
x = self.cnn_layer[i](x)
x = self.res_layer[i](x)
x = x.view(x.shape[0], (- 1))
mu = self.mu(x)
std = nn.Softplus()(self.std(x))
z = self.reparameterize(mu, std)
return (z, mu, std)
def reparameterize(self, mu, std):
eps = torch.randn_like(std)
return (mu + (eps * std))
def forward(self, x):
(z, mu, std) = self.encode(x)
return (z, mu, std) |
class VirtualSplitWeightsNode(VirtualSplitNode):
def __init__(self, origin_node: BaseNode):
super().__init__(origin_node)
self.name = (origin_node.name + VIRTUAL_WEIGHTS_SUFFIX)
self.candidates_quantization_cfg = origin_node.get_unique_weights_candidates()
for c in self.candidates_quantization_cfg:
c.activation_quantization_cfg.enable_activation_quantization = False
c.activation_quantization_cfg.activation_n_bits = FLOAT_BITWIDTH |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.