code stringlengths 101 5.91M |
|---|
def seed_test_case0():
var0 = 10
var1 = module0.Simple(var0)
var2 = [1, 2, 3]
var3 = var1.do_something(var2)
assert (var3 == 'not empty!') |
def max_tree_local_maxima(image, connectivity=1, parent=None, tree_traverser=None):
output = np.ones(image.shape, dtype=np.uint64)
if ((parent is None) or (tree_traverser is None)):
(parent, tree_traverser) = max_tree(image, connectivity)
_max_tree._max_tree_local_maxima(image.ravel(), output.ravel(), parent.ravel(), tree_traverser)
return output |
class GVContext(object):
def __init__(self):
self.blockids = {}
self.nextid = 0
self.children = []
self.sources = {}
def add(self, child):
self.children.append(child)
def nodeid(self, block):
if (block not in self.blockids):
self.blockids[block] = ('block%d' % self.nextid)
self.nextid += 1
return self.blockids[block]
def extract_sources(self, block):
if (not block.positions):
return ''
start = min(block.positions)
stop = max(block.positions)
srcdescr = start[0]
if (not (srcdescr in self.sources)):
self.sources[srcdescr] = list(srcdescr.get_lines())
lines = self.sources[srcdescr]
return '\\n'.join([l.strip() for l in lines[(start[1] - 1):stop[1]]])
def render(self, fp, name, annotate_defs=False):
fp.write(('digraph %s {\n' % name))
fp.write(' node [shape=box];\n')
for child in self.children:
child.render(fp, self, annotate_defs)
fp.write('}\n')
def escape(self, text):
return text.replace('"', '\\"').replace('\n', '\\n') |
class ResNet3X3(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 128
super(ResNet3X3, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = mynn.Norm2d(64)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = mynn.Norm2d(64)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.bn3 = mynn.Norm2d(self.inplanes)
self.relu3 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif (isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.SyncBatchNorm)):
if (m.weight is not None):
nn.init.constant_(m.weight, 1)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), mynn.Norm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for index in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu3(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
def test_regular():
array = ak.Array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert ak.almost_equal(array, array, check_regular=True)
assert ak.almost_equal(array, array, check_regular=False)
assert (not ak.almost_equal(array, ak.to_regular(array), check_regular=True))
assert ak.almost_equal(array, ak.to_regular(array), check_regular=False)
list_array = array[::1]
assert isinstance(list_array.layout, ak.contents.ListArray)
assert ak.almost_equal(array, list_array, check_regular=False)
assert ak.almost_equal(array, list_array, check_regular=True)
assert (not ak.almost_equal(list_array, ak.to_regular(array), check_regular=True))
assert ak.almost_equal(list_array, ak.to_regular(array), check_regular=False)
numpy_array = ak.from_numpy(ak.to_numpy(array), regulararray=False)
assert ak.almost_equal(numpy_array, ak.to_regular(array), check_regular=True)
assert ak.almost_equal(numpy_array, ak.to_regular(array), check_regular=False)
assert (not ak.almost_equal(numpy_array, array, check_regular=True))
assert ak.almost_equal(numpy_array, array, check_regular=False) |
class PublishFindingsTask():
def __init__(self, experiment_id: str, compiles_base_path: str, review_site_url: str, review_site_user: str='', review_site_password: str=''):
super().__init__()
self.max_files_per_post = 20
self.max_post_size_in_bytes = 7000
self.experiment_id = experiment_id
self.compiles_base_path = compiles_base_path
self.review_site_url = review_site_url
self.review_site_user = review_site_user
self.review_site_password = review_site_password
if (self.review_site_user and (not self.review_site_password)):
self.review_site_password = getpass.getpass("Enter review-site password for '{}': ".format(self.review_site_user))
def run(self, project: Project, version: ProjectVersion, detector_run: DetectorRun, potential_hits: PotentialHits, version_compile: VersionCompile, detector: Detector):
logger = logging.getLogger('tasks.publish_findings.version')
logger.info('Publishing findings of %s in %s on %s for upload to %s...', detector, self.experiment_id, version, self.review_site_url)
if detector_run.is_success():
logger.info('Uploading %s potential hits.', len(potential_hits.findings))
result = 'success'
elif detector_run.is_error():
logger.info('Detector produced an error.')
result = 'error'
elif detector_run.is_timeout():
logger.info('Detector timed out.')
result = 'timeout'
else:
logger.info('Detector was not run.')
result = 'not run'
run_info = detector_run.get_run_info()
postable_potential_hits = [self.__to_postable_potential_hit(potential_hit, version_compile, detector_run.findings_path, logger) for potential_hit in potential_hits.findings]
try:
for postable_potential_hits_slice in self.__slice_by_number_of_files_and_post_size(postable_potential_hits):
file_paths = self.__get_file_paths(postable_potential_hits_slice)
postable_data = self.__to_postable_data(run_info, result, postable_potential_hits_slice)
self.__post(project, version, detector, postable_data, file_paths)
except RequestException as e:
raise PublishFailedException(e)
def __slice_by_number_of_files_and_post_size(self, potential_hits: List['SpecializedFinding']) -> List[List[Finding]]:
potential_hits_slice = []
number_of_files_in_slice = 0
size_of_slice = 0
for potential_hit in potential_hits:
number_of_files_in_hit = len(potential_hit.files)
size_of_hit = total_size(potential_hit)
exceeds_max_files_per_post = ((number_of_files_in_slice + number_of_files_in_hit) > self.max_files_per_post)
exceeds_max_post_size = ((size_of_slice + size_of_hit) > self.max_post_size_in_bytes)
if (potential_hits_slice and (exceeds_max_files_per_post or exceeds_max_post_size)):
(yield potential_hits_slice)
potential_hits_slice = [potential_hit]
number_of_files_in_slice = number_of_files_in_hit
size_of_slice = size_of_hit
else:
potential_hits_slice.append(potential_hit)
number_of_files_in_slice += number_of_files_in_hit
size_of_slice += size_of_hit
(yield potential_hits_slice)
def __to_postable_data(self, run_info, result, postable_potential_hits: List[Dict]):
data = self._to_markdown_dict(run_info)
data['result'] = result
data['potential_hits'] = postable_potential_hits
return data
def __to_postable_potential_hit(self, potential_hit: Finding, version_compile: VersionCompile, findings_path, logger) -> 'SpecializedFinding':
files = self._convert_graphs_to_files(potential_hit, findings_path)
postable_potential_hit = self._to_markdown_dict(potential_hit)
postable_potential_hit[_SNIPPETS_KEY] = self.__get_postable_snippets(potential_hit, version_compile, logger)
return SpecializedFinding(postable_potential_hit, files)
def __get_postable_snippets(finding: Finding, version_compile: VersionCompile, logger) -> List[Dict]:
return [snippet.__dict__ for snippet in PublishFindingsTask.__get_snippets(finding, version_compile, logger)]
def __get_snippets(finding, version_compile, logger):
try:
return finding.get_snippets(version_compile.original_sources_paths)
except SnippetUnavailableException as e:
logger.warning(e)
return []
def _to_markdown_dict(finding: Finding) -> Dict[(str, str)]:
markdown_dict = dict()
for (key, value) in finding.items():
if (key != _SNIPPETS_KEY):
markdown_dict[key] = as_markdown(value)
return markdown_dict
def __get_file_paths(findings: List['SpecializedFinding']) -> List[str]:
files = []
for finding in findings:
files.extend(finding.files)
return files
def __post(self, project, version, detector, postable_data, file_paths):
url = self.__get_publish_findings_url(detector, project, version)
post(url, postable_data, file_paths=file_paths, username=self.review_site_user, password=self.review_site_password)
def __get_publish_findings_url(self, detector, project, version):
experiment_id = self.experiment_id[2:]
return urljoin(self.review_site_url, 'experiments/{}/detectors/{}/projects/{}/versions/{}/runs'.format(experiment_id, detector.id, project.id, version.version_id))
def _convert_graphs_to_files(potential_hit: Dict, findings_path: str) -> List[str]:
files = []
graph_pattern = re.compile('(graph|digraph) .* {.*?}', re.DOTALL)
for (key, value) in potential_hit.items():
if ((type(value) is str) and graph_pattern.match(value)):
files.append(replace_dot_graph_with_image(potential_hit, key, findings_path))
return files |
def flatten_grads(var_list, grads):
return tf.concat([tf.reshape(grad, [U.numel(v)]) for (v, grad) in zip(var_list, grads)], 0) |
class SymforceUtilTest(TestCase):
def test_symbolic_eval(self) -> None:
def f(x: T.Scalar, y: sf.V1, z: sf.V2, w: sf.M22, r: sf.Rot3) -> T.Scalar:
return (x, y, z, w, r)
(x, y, z, w, r) = util.symbolic_eval(f)
self.assertIsInstance(x, sf.Symbol)
self.assertIsInstance(y, sf.V1)
self.assertIsInstance(z, sf.V2)
self.assertIsInstance(w, sf.M22)
self.assertIsInstance(r, sf.Rot3)
def test_lambdify(self) -> None:
def f(x: T.Scalar, y: sf.V1, z: sf.V2, w: sf.M22, r: sf.Rot3) -> T.Scalar:
return (x, y, z, w, r)
numeric_f = util.lambdify(f)
(x, y, z, w, r) = numeric_f(0.0, np.zeros((1,)), np.zeros((2,)), np.zeros((2, 2)), sym.Rot3())
self.assertIsInstance(x, float)
self.assertIsInstance(y, np.ndarray)
self.assertIsInstance(z, np.ndarray)
self.assertIsInstance(w, np.ndarray)
self.assertIsInstance(r, sym.Rot3)
((importlib.util.find_spec('numba') is None), 'Requires numba')
def test_numbify(self) -> None:
import numba
def f(x: T.Scalar, y: sf.V1, z: sf.V2, w: sf.M22) -> T.Scalar:
return (x, y, z, w)
numeric_f = util.numbify(f)
(x, y, z, w) = numeric_f(0.0, np.zeros((1,)), np.zeros((2,)), np.zeros((2, 2)))
self.assertIsInstance(x, float)
self.assertIsInstance(y, np.ndarray)
self.assertIsInstance(z, np.ndarray)
self.assertIsInstance(w, np.ndarray)
def f_bad(x: T.Scalar, y: sf.V1, z: sf.V2, w: sf.M22, r: sf.Rot3) -> T.Scalar:
return (x, y, z, w, r)
numeric_f = util.numbify(f_bad)
with self.assertRaises(numba.core.errors.TypingError):
numeric_f(0.0, np.zeros((1,)), np.zeros((2,)), np.zeros((2, 2)), sym.Rot3()) |
def extract_id_from_mp3_path(path) -> str:
fname = os.path.basename(path)
return fname.replace('.mp3', '') |
def generate_match_method(byte_array, template):
s = StringIO()
fields = template.fields()
field_types = [f.c_type() for f in fields]
field_names = [f.name for f in fields]
args = ((', ' + ', '.join((('%s: &mut %s' % (t, n)) for (t, n) in zip(field_names, field_types)))) if fields else '')
s.write((' pub fn matchp(buffer: &[u8] %s) -> bool {\n' % (args,)))
offset = 0
for chunk in template.chunks:
if isinstance(chunk, Field):
field_name = chunk.name
s.write((' *%s = %s::from_le_bytes(buffer[%d..%d + std::mem::size_of_val(&%s)].try_into().unwrap());\n' % (field_name, chunk.c_type(), offset, offset, field_name)))
else:
s.write((' if buffer[%d..%d] != %s[%d..%d] { return false; }\n' % (offset, (offset + len(chunk)), byte_array, offset, (offset + len(chunk)))))
offset += len(chunk)
s.write(' true\n')
s.write(' }')
return s.getvalue() |
def simple_renderer(rn, verts, faces, yrot=np.radians(120)):
color = colors['pink']
rn.set(v=verts, f=faces, vc=color, bgcolor=np.ones(3))
albedo = rn.vc
rn.vc = LambertianPointLight(f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=_rotateY(np.array([(- 200), (- 100), (- 100)]), yrot), vc=albedo, light_color=np.array([1, 1, 1]))
rn.vc += LambertianPointLight(f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=_rotateY(np.array([800, 10, 300]), yrot), vc=albedo, light_color=np.array([1, 1, 1]))
rn.vc += LambertianPointLight(f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=_rotateY(np.array([(- 500), 500, 1000]), yrot), vc=albedo, light_color=np.array([0.7, 0.7, 0.7]))
return rn.r |
def count_flops_given_config(net_config, image_size=224):
flops = 0
flops += count_conv_flop(((image_size + 1) // 2), 3, net_config['first_conv']['out_channels'], 3, 1)
fsize = ((image_size + 1) // 2)
for block in net_config['blocks']:
mb_conv = (block['mobile_inverted_conv'] if ('mobile_inverted_conv' in block) else block['conv'])
if (mb_conv is None):
continue
out_fz = int((((fsize - 1) / mb_conv['stride']) + 1))
if ('in_channel_list' in mb_conv.keys()):
mb_conv['in_channels'] = mb_conv['in_channel_list'][0]
if ('out_channel_list' in mb_conv.keys()):
mb_conv['out_channels'] = mb_conv['out_channel_list'][0]
if ('kernel_size_list' in mb_conv.keys()):
mb_conv['kernel_size'] = mb_conv['kernel_size_list'][0]
if ('expand_ratio_list' in mb_conv.keys()):
mb_conv['expand_ratio'] = mb_conv['expand_ratio_list'][0]
mb_conv['mid_channels'] = round((mb_conv['in_channels'] * mb_conv['expand_ratio']))
if (mb_conv['expand_ratio'] != 1):
flops += count_conv_flop(fsize, mb_conv['in_channels'], mb_conv['mid_channels'], 1, 1)
flops += count_conv_flop(out_fz, mb_conv['mid_channels'], mb_conv['mid_channels'], mb_conv['kernel_size'], mb_conv['mid_channels'])
if mb_conv['use_se']:
se_mid = make_divisible((mb_conv['mid_channels'] // 4), divisor=8)
flops += count_conv_flop(1, mb_conv['mid_channels'], se_mid, 1, 1)
flops += count_conv_flop(1, se_mid, mb_conv['mid_channels'], 1, 1)
flops += count_conv_flop(out_fz, mb_conv['mid_channels'], mb_conv['out_channels'], 1, 1)
fsize = out_fz
flops += count_conv_flop(fsize, net_config['final_expand_layer']['in_channels'], net_config['final_expand_layer']['out_channels'], 1, 1)
flops += count_conv_flop(1, net_config['feature_mix_layer']['in_channels'], net_config['feature_mix_layer']['out_channels'], 1, 1)
flops += count_conv_flop(1, net_config['classifier']['in_features'], net_config['classifier']['out_features'], 1, 1)
return (flops / 1000000.0) |
class HistnormEvaluator(BasicEvaluator):
def evaluate(self, predict, ground_truth):
(correct, dist) = super().evaluate(predict, ground_truth)
return (correct, (dist / len(ground_truth))) |
class ModularForms(FormsSpace_abstract, Module, UniqueRepresentation):
def __classcall__(cls, group=HeckeTriangleGroup(3), base_ring=ZZ, k=QQ(0), ep=None, n=None):
(group, base_ring, k, ep, n) = canonical_parameters(group, base_ring, k, ep, n)
return super().__classcall__(cls, group=group, base_ring=base_ring, k=k, ep=ep, n=n)
def __init__(self, group, base_ring, k, ep, n):
FormsSpace_abstract.__init__(self, group=group, base_ring=base_ring, k=k, ep=ep, n=n)
Module.__init__(self, base=base_ring)
self._analytic_type = self.AT(['holo'])
self._module = FreeModule(self.coeff_ring(), self.dimension())
_method
def gens(self):
return [self.F_basis(m) for m in range(self.dimension())]
_method
def dimension(self):
return max((self._l1 + 1), ZZ.zero())
_method
def coordinate_vector(self, v):
vec = v.q_expansion_vector(min_exp=0, max_exp=(self.degree() - 1))
return self._module(vec) |
def interpolate(sparse_points, dense_points, nn_num=1, GPU_id=None):
if ((GPU_id is not None) and cal_knn.FAISS_INSTALLED):
knn_module = cal_knn.FaissNN
else:
knn_module = cal_knn.Open3dNN
knn = knn_module(GPU_id=GPU_id)
knn.train(sparse_points)
return knn.search(dense_points, nn_num) |
class GeneralizedRCNNWithTTA(nn.Module):
def __init__(self, cfg, model, tta_mapper=None, batch_size=3):
super().__init__()
if isinstance(model, DistributedDataParallel):
model = model.module
assert isinstance(model, GeneralizedRCNN), 'TTA is only supported on GeneralizedRCNN. Got a model of type {}'.format(type(model))
self.cfg = cfg.clone()
assert (not self.cfg.MODEL.KEYPOINT_ON), 'TTA for keypoint is not supported yet'
assert (not self.cfg.MODEL.LOAD_PROPOSALS), 'TTA for pre-computed proposals is not supported yet'
self.model = model
if (tta_mapper is None):
tta_mapper = DatasetMapperTTA(cfg)
self.tta_mapper = tta_mapper
self.batch_size = batch_size
def _turn_off_roi_heads(self, attrs):
roi_heads = self.model.roi_heads
old = {}
for attr in attrs:
try:
old[attr] = getattr(roi_heads, attr)
except AttributeError:
pass
if (len(old.keys()) == 0):
(yield)
else:
for attr in old.keys():
setattr(roi_heads, attr, False)
(yield)
for attr in old.keys():
setattr(roi_heads, attr, old[attr])
def _batch_inference(self, batched_inputs, detected_instances=None):
if (detected_instances is None):
detected_instances = ([None] * len(batched_inputs))
outputs = []
(inputs, instances) = ([], [])
for (idx, input, instance) in zip(count(), batched_inputs, detected_instances):
inputs.append(input)
instances.append(instance)
if ((len(inputs) == self.batch_size) or (idx == (len(batched_inputs) - 1))):
outputs.extend(self.model.inference(inputs, (instances if (instances[0] is not None) else None), do_postprocess=False))
(inputs, instances) = ([], [])
return outputs
def __call__(self, batched_inputs):
def _maybe_read_image(dataset_dict):
ret = copy.copy(dataset_dict)
if ('image' not in ret):
image = read_image(ret.pop('file_name'), self.image_format)
image = torch.from_numpy(image).permute(2, 0, 1)
ret['image'] = image
if (('height' not in ret) and ('width' not in ret)):
ret['height'] = image.shape[1]
ret['width'] = image.shape[2]
return ret
return [self._inference_one_image(_maybe_read_image(x)) for x in batched_inputs]
def _inference_one_image(self, input):
orig_shape = (input['height'], input['width'])
(augmented_inputs, tfms) = self._get_augmented_inputs(input)
with self._turn_off_roi_heads(['mask_on', 'keypoint_on']):
(all_boxes, all_scores, all_classes) = self._get_augmented_boxes(augmented_inputs, tfms)
merged_instances = self._merge_detections(all_boxes, all_scores, all_classes, orig_shape)
if self.cfg.MODEL.MASK_ON:
augmented_instances = self._rescale_detected_boxes(augmented_inputs, merged_instances, tfms)
outputs = self._batch_inference(augmented_inputs, augmented_instances)
del augmented_inputs, augmented_instances
merged_instances.pred_masks = self._reduce_pred_masks(outputs, tfms)
merged_instances = detector_postprocess(merged_instances, *orig_shape)
return {'instances': merged_instances}
else:
return {'instances': merged_instances}
def _get_augmented_inputs(self, input):
augmented_inputs = self.tta_mapper(input)
tfms = [x.pop('transforms') for x in augmented_inputs]
return (augmented_inputs, tfms)
def _get_augmented_boxes(self, augmented_inputs, tfms):
outputs = self._batch_inference(augmented_inputs)
all_boxes = []
all_scores = []
all_classes = []
for (output, tfm) in zip(outputs, tfms):
pred_boxes = output.pred_boxes.tensor
original_pred_boxes = tfm.inverse().apply_box(pred_boxes.cpu().numpy())
all_boxes.append(torch.from_numpy(original_pred_boxes).to(pred_boxes.device))
all_scores.extend(output.scores)
all_classes.extend(output.pred_classes)
all_boxes = torch.cat(all_boxes, dim=0)
return (all_boxes, all_scores, all_classes)
def _merge_detections(self, all_boxes, all_scores, all_classes, shape_hw):
num_boxes = len(all_boxes)
num_classes = self.cfg.MODEL.ROI_HEADS.NUM_CLASSES
all_scores_2d = torch.zeros(num_boxes, (num_classes + 1), device=all_boxes.device)
for (idx, cls, score) in zip(count(), all_classes, all_scores):
all_scores_2d[(idx, cls)] = score
(merged_instances, _) = fast_rcnn_inference_single_image(all_boxes, all_scores_2d, shape_hw, 1e-08, self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST, self.cfg.TEST.DETECTIONS_PER_IMAGE)
return merged_instances
def _rescale_detected_boxes(self, augmented_inputs, merged_instances, tfms):
augmented_instances = []
for (input, tfm) in zip(augmented_inputs, tfms):
pred_boxes = merged_instances.pred_boxes.tensor.cpu().numpy()
pred_boxes = torch.from_numpy(tfm.apply_box(pred_boxes))
aug_instances = Instances(image_size=input['image'].shape[1:3], pred_boxes=Boxes(pred_boxes), pred_classes=merged_instances.pred_classes, scores=merged_instances.scores)
augmented_instances.append(aug_instances)
return augmented_instances
def _reduce_pred_masks(self, outputs, tfms):
for (output, tfm) in zip(outputs, tfms):
if any((isinstance(t, HFlipTransform) for t in tfm.transforms)):
output.pred_masks = output.pred_masks.flip(dims=[3])
all_pred_masks = torch.stack([o.pred_masks for o in outputs], dim=0)
avg_pred_masks = torch.mean(all_pred_masks, dim=0)
return avg_pred_masks |
def _preprocess_zero_mean_unit_range(inputs):
return (((2.0 / 255.0) * tf.to_float(inputs)) - 1.0) |
class Set_object_intersection(Set_object_binary):
def __init__(self, X, Y, category=None):
if (category is None):
category = Sets()
if any(((S in Sets().Finite()) for S in (X, Y))):
category = category.Finite()
if any(((S in Sets().Enumerated()) for S in (X, Y))):
category = category.Enumerated()
Set_object_binary.__init__(self, X, Y, 'intersection', '\\cap', category=category)
def is_finite(self):
if self._X.is_finite():
return True
elif self._Y.is_finite():
return True
raise NotImplementedError
def __richcmp__(self, right, op):
if (not isinstance(right, Set_generic)):
return rich_to_bool(op, (- 1))
if (not isinstance(right, Set_object_intersection)):
return rich_to_bool(op, (- 1))
if (((self._X == right._X) and (self._Y == right._Y)) or ((self._X == right._Y) and (self._Y == right._X))):
return rich_to_bool(op, 0)
return rich_to_bool(op, (- 1))
def __iter__(self):
X = self._X
Y = self._Y
if ((not self._X.is_finite()) and self._Y.is_finite()):
(X, Y) = (Y, X)
for x in X:
if (x in Y):
(yield x)
def __contains__(self, x):
return ((x in self._X) and (x in self._Y))
_method
def _sympy_(self):
from sympy import Intersection
from sage.interfaces.sympy import sympy_init
sympy_init()
return Intersection(self._X._sympy_(), self._Y._sympy_()) |
def StopImmediate():
global _immediate_mode
global _immediate_root_folder
if (not IsImmediate()):
return
with WorkspaceGuard(_immediate_workspace_name):
ResetWorkspace()
shutil.rmtree(_immediate_root_folder)
_immediate_root_folder = ''
_immediate_mode = False |
def check(opt, type_model, encoding=config.encoding, assume_inhabited=False):
logger.info('Checking refinement of %r', opt.name)
encoding = smtinterp.lookup(encoding)
smt = encoding(type_model)
asm = smt.conjunction(opt.asm)
premise = ((asm.aux + asm.safe) + asm.value)
if (asm.defined or asm.nonpoison):
raise Exception('Defined/Non-poison condition declared by assumption')
pre = smt.conjunction(opt.pre)
premise += pre.aux
if (pre.defined or pre.nonpoison):
raise Exception('Defined/Non-poison condition declared by precondition')
src = smt(opt.src)
if src.aux:
raise Exception('Auxiliary condition declared by source')
tgt = smt(opt.tgt)
premise += tgt.aux
def check_expr(stage, expr):
m = satisfiable(expr, opt.name, _stage_name[stage])
if (m is not None):
raise CounterExampleError(stage, m, type_model, opt.src, src.value, tgt.value, encoding)
if pre.safe:
check_expr(PRESAFE, mk_and((premise + [mk_not(pre.safe)])))
premise += pre.value
inhabited = (assume_inhabited or (satisfiable(mk_and(premise), opt.name, 'inhabited') is not None))
if tgt.safe:
check_expr(TGTSAFE, mk_and((premise + [mk_not(tgt.safe)])))
premise += src.defined
if config.poison_undef:
premise += src.nonpoison
if tgt.defined:
expr = (premise + [mk_not(tgt.defined)])
check_expr(UB, mk_forall(src.qvars, expr))
if (not config.poison_undef):
premise += src.nonpoison
if tgt.nonpoison:
check_expr(POISON, mk_forall(src.qvars, (premise + [mk_not(tgt.nonpoison)])))
check_expr(UNEQUAL, mk_forall(src.qvars, (premise + [z3.Not((src.value == tgt.value))])))
return inhabited |
def build_conv_model(model_name, batch_size):
model_gen_map = conv_model_generators()
assert (model_name in model_gen_map), (('Model ' + model_name) + ' not found')
(model, input_size) = model_gen_map[model_name]('NCHW', None)
input_shape = [batch_size, 3, input_size, input_size]
if (model_name == 'MLP'):
input_shape = [batch_size, input_size]
model.param_init_net.GaussianFill([], 'data', shape=input_shape, mean=0.0, std=1.0)
model.param_init_net.UniformIntFill([], 'label', shape=[batch_size], min=0, max=999)
model.AddGradientOperators(['loss'])
ITER = brew.iter(model, 'iter')
LR = model.net.LearningRate(ITER, 'LR', base_lr=(- 1e-08), policy='step', stepsize=10000, gamma=0.999)
ONE = model.param_init_net.ConstantFill([], 'ONE', shape=[1], value=1.0)
for param in model.params:
param_grad = model.param_to_grad[param]
model.net.WeightedSum([param, ONE, param_grad, LR], param)
return model |
class BlockRounding(torch.autograd.Function):
def forward(self, x, forward_bits, backward_bits, mode, small_block='None', block_dim='B'):
self.backward_bits = backward_bits
self.mode = mode
if (forward_bits == (- 1)):
return x
self.small_block = small_block
self.block_dim = block_dim
return block_quantize(x, forward_bits, self.mode, small_block=self.small_block, block_dim=self.block_dim)
def backward(self, grad_output):
if self.needs_input_grad[0]:
if (self.backward_bits != (- 1)):
grad_input = block_quantize(grad_output, self.backward_bits, self.mode, small_block=self.small_block, block_dim=self.block_dim)
else:
grad_input = grad_output
return (grad_input, None, None, None, None, None, None) |
def _check_numclasscheckhook(detector, config_mod):
dummy_runner = Mock()
dummy_runner.model = detector
def get_dataset_name_classes(dataset):
if isinstance(dataset, (list, tuple)):
dataset = dataset[0]
while ('dataset' in dataset):
dataset = dataset['dataset']
if isinstance(dataset, (list, tuple)):
dataset = dataset[0]
return (dataset['type'], dataset.get('classes', None))
compatible_check = NumClassCheckHook()
(dataset_name, CLASSES) = get_dataset_name_classes(config_mod['data']['train'])
if (CLASSES is None):
CLASSES = DATASETS.get(dataset_name).CLASSES
dummy_runner.data_loader.dataset.CLASSES = CLASSES
compatible_check.before_train_epoch(dummy_runner)
dummy_runner.data_loader.dataset.CLASSES = None
compatible_check.before_train_epoch(dummy_runner)
(dataset_name, CLASSES) = get_dataset_name_classes(config_mod['data']['val'])
if (CLASSES is None):
CLASSES = DATASETS.get(dataset_name).CLASSES
dummy_runner.data_loader.dataset.CLASSES = CLASSES
compatible_check.before_val_epoch(dummy_runner)
dummy_runner.data_loader.dataset.CLASSES = None
compatible_check.before_val_epoch(dummy_runner) |
class Langermann(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([0.0] * self.N), ([10.0] * self.N)))
self.global_optimum = [[2., 1.006096]]
self.fglob = (- 5.1621259)
def fun(self, x, *args):
self.nfev += 1
a = [3, 5, 2, 1, 7]
b = [5, 2, 1, 4, 9]
c = [1, 2, 5, 2, 3]
return (- sum(((c * exp(((- (1 / pi)) * (((x[0] - a) ** 2) + ((x[1] - b) ** 2))))) * cos((pi * (((x[0] - a) ** 2) + ((x[1] - b) ** 2))))))) |
def has_leading_dir(paths):
common_prefix = None
for path in paths:
(prefix, rest) = split_leading_dir(path)
if (not prefix):
return False
elif (common_prefix is None):
common_prefix = prefix
elif (prefix != common_prefix):
return False
return True |
def accuracy_evaluation(input_net, dataset_loader, working_device):
input_net = input_net.eval()
correct_acc = 0
total_acc = 0
prefetcher = DataPreFetcher(dataset_loader)
(image, label) = prefetcher.next()
with tqdm(total=len(dataset_loader)) as pbar:
while (image is not None):
pbar.update(1)
if (working_device.type == 'cuda'):
image = image.cuda()
label = label.cuda()
prediction = input_net(image)
(correct, total) = accuracy_factor(prediction, label)
correct_acc += correct
total_acc += total
(image, label) = prefetcher.next()
return ((100 * correct_acc) / total_acc) |
_spec_function('natural_qa')
def get_natural_qa_spec(mode: str) -> RunSpec:
scenario_spec = ScenarioSpec(class_name='helm.benchmark.scenarios.natural_qa_scenario.NaturalQAScenario', args={'mode': mode})
adapter_spec = get_generation_adapter_spec(input_noun=('Question' if (mode == 'closedbook') else None), output_noun='Answer', max_tokens=300)
return RunSpec(name=f'natural_qa:mode={mode}', scenario_spec=scenario_spec, adapter_spec=adapter_spec, metric_specs=(get_f1_metric_specs() + get_generative_harms_metric_specs()), groups=[f'natural_qa_{mode}']) |
class CrossEntropy2d(nn.Module):
def __init__(self, ignore_label=255):
super(CrossEntropy2d, self).__init__()
self.ignore_label = ignore_label
def forward(self, predict, target, weight=None):
assert (not target.requires_grad)
assert (predict.dim() == 4)
assert (target.dim() == 3)
(n, c, h, w) = predict.size()
target_mask = ((target >= 0) * (target != self.ignore_label))
target = target[target_mask]
if (not target.data.dim()):
return Variable(torch.zeros(1))
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view((- 1), c)
loss = F.cross_entropy(predict, target, weight=weight, reduction='elementwise_mean')
return loss |
class VQNoDiscModel(VQModel):
def __init__(self, ddconfig, lossconfig, n_embed, embed_dim, ckpt_path=None, ignore_keys=[], image_key='image', colorize_nlabels=None):
super().__init__(ddconfig=ddconfig, lossconfig=lossconfig, n_embed=n_embed, embed_dim=embed_dim, ckpt_path=ckpt_path, ignore_keys=ignore_keys, image_key=image_key, colorize_nlabels=colorize_nlabels)
def training_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
(xrec, qloss) = self(x)
(aeloss, log_dict_ae) = self.loss(qloss, x, xrec, self.global_step, split='train')
output = pl.TrainResult(minimize=aeloss)
output.log('train/aeloss', aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
output.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return output
def validation_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
(xrec, qloss) = self(x)
(aeloss, log_dict_ae) = self.loss(qloss, x, xrec, self.global_step, split='val')
rec_loss = log_dict_ae['val/rec_loss']
output = pl.EvalResult(checkpoint_on=rec_loss)
output.log('val/rec_loss', rec_loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
output.log('val/aeloss', aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
output.log_dict(log_dict_ae)
return output
def configure_optimizers(self):
optimizer = torch.optim.Adam(((((list(self.encoder.parameters()) + list(self.decoder.parameters())) + list(self.quantize.parameters())) + list(self.quant_conv.parameters())) + list(self.post_quant_conv.parameters())), lr=self.learning_rate, betas=(0.5, 0.9))
return optimizer |
def k_center_greedy_slow(X, s, b):
n = X.shape[0]
p = np.setdiff1d(np.arange(n), s, assume_unique=True).tolist()
sel = list(s)
for i in range(b):
D = scipy.spatial.distance.cdist(X[sel], X[p], metric='euclidean')
j = np.argmax(np.min(D, axis=0))
u = p[j]
sel.append(u)
p.pop(j)
return np.asarray(sel[(- b):]) |
def test_data_iterator(files, seq_len):
load_records_np(files=files, seq_len=seq_len)
test_restore_state(files=files, seq_len=seq_len) |
def GuttmanLambdaA_calc(TP, FP, FN, TN):
try:
n = (((TP + FP) + FN) + TN)
part1 = (max(TP, FN) + max(FP, TN))
part2 = max((TP + FP), (FN + TN))
return ((part1 - part2) / (n - part2))
except Exception:
return 'None' |
class BaseOptions():
def initialize(self, parser):
parser.add_argument('--name', type=str, required=True, help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--easy_label', type=str, default='')
parser.add_argument('--num_gpus', type=int, default=1, help='#GPUs to use. 0 means CPU mode')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints/', help='models are saved here')
parser.add_argument('--model', type=str, default='swapping_autoencoder', help='which model to use')
parser.add_argument('--optimizer', type=str, default='swapping_autoencoder', help='which model to use')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
parser.add_argument('--resume_iter', type=str, default='latest', help='# iterations (in thousands) to resume')
parser.add_argument('--num_classes', type=int, default=0)
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--preprocess', type=str, default='scale_width_and_crop', help='scaling and cropping of images at load time.')
parser.add_argument('--load_size', type=int, default=256, help='Scale images to this size. The final image will be cropped to --crop_size.')
parser.add_argument('--crop_size', type=int, default=256, help='Crop to the width of crop_size (after initially scaling the images to load_size.)')
parser.add_argument('--preprocess_crop_padding', type=int, default=None, help='padding parameter of transforms.RandomCrop(). It is not used if --preprocess does not contain crop option.')
parser.add_argument('--no_flip', action='store_true')
parser.add_argument('--shuffle_dataset', type=str, default=None, choices=('true', 'false'))
parser.add_argument('--dataroot', type=str, default='.')
parser.add_argument('--dataset_mode', type=str, default='lmdb')
parser.add_argument('--nThreads', default=8, type=int, help='# threads for loading data')
parser.add_argument('--netG', default='StyleGAN2Resnet')
parser.add_argument('--netD', default='StyleGAN2')
parser.add_argument('--netE', default='StyleGAN2Resnet')
parser.add_argument('--netPatchD', default='StyleGAN2')
parser.add_argument('--use_antialias', type=util.str2bool, default=True)
return parser
def gather_options(self, command=None):
parser = AugmentedArgumentParser()
parser.custom_command = command
parser = self.initialize(parser)
(opt, unknown) = parser.parse_known_args()
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
parser = networks.modify_commandline_options(parser, self.isTrain)
optimizer_name = opt.optimizer
optimizer_option_setter = optimizers.get_option_setter(optimizer_name)
parser = optimizer_option_setter(parser, self.isTrain)
dataset_mode = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_mode)
parser = dataset_option_setter(parser, self.isTrain)
parser = Visualizer.modify_commandline_options(parser, self.isTrain)
parser = IterationCounter.modify_commandline_options(parser, self.isTrain)
evaluation_option_setter = evaluation.get_option_setter()
parser = evaluation_option_setter(parser, self.isTrain)
(opt, unknown) = parser.parse_known_args()
opt = parser.parse_args()
self.parser = parser
return opt
def print_options(self, opt):
message = ''
message += ' Options \n'
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += ' End '
print(message)
def option_file_path(self, opt, makedir=False):
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
if makedir:
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt')
return file_name
def save_options(self, opt):
file_name = self.option_file_path(opt, makedir=True)
with open((file_name + '.txt'), 'wt') as opt_file:
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
opt_file.write('{:>25}: {:<30}{}\n'.format(str(k), str(v), comment))
with open((file_name + '.pkl'), 'wb') as opt_file:
pickle.dump(opt, opt_file)
def parse(self, save=False, command=None):
opt = self.gather_options(command)
opt.isTrain = self.isTrain
self.print_options(opt)
if opt.isTrain:
self.save_options(opt)
opt.dataroot = os.path.expanduser(opt.dataroot)
assert (opt.num_gpus <= opt.batch_size), 'Batch size must not be smaller than num_gpus'
return opt |
class XLMProphetNetPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def process_kron(kron_dir):
txt_files = []
for f in os.listdir(kron_dir):
filename = os.fsdecode(f)
if filename.endswith('.txt'):
txt_files.append(filename)
elif filename.endswith('.dat'):
return utils.load_graph_list(os.path.join(kron_dir, filename))
G_list = []
for filename in txt_files:
G_list.append(utils.snap_txt_output_to_nx(os.path.join(kron_dir, filename)))
return G_list |
class UF1(Metric):
def __init__(self, eps=1e-08, device=torch.device('cuda')):
super(UF1, self).__init__()
self.f1 = 0.0
self.evalb = 0.0
self.n = 0.0
self.eps = eps
self.tp = 0.0
self.fp = 0.0
self.fn = 0.0
self.device = device
def __call__(self, preds, golds):
for (pred, gold) in zip(preds, golds):
if (len(pred) == 0):
continue
length = max(gold, key=(lambda x: x[1]))[1]
gold = list(filter((lambda x: ((x[0] + 1) != x[1])), gold))
pred = list(filter((lambda x: ((x[0] + 1) != x[1])), pred))
gold = list(filter((lambda x: (not ((x[0] == 0) and (x[1] == length)))), gold))
pred = list(filter((lambda x: (not ((x[0] == 0) and (x[1] == length)))), pred))
gold = [g[:2] for g in gold]
pred = [p[:2] for p in pred]
gold = list(map(tuple, gold))
for span in pred:
if (span in gold):
self.tp += 1
else:
self.fp += 1
for span in gold:
if (span not in pred):
self.fn += 1
gold = set(gold)
pred = set(pred)
overlap = pred.intersection(gold)
prec = (float(len(overlap)) / (len(pred) + self.eps))
reca = (float(len(overlap)) / (len(gold) + self.eps))
if (len(gold) == 0):
reca = 1.0
if (len(pred) == 0):
prec = 1.0
f1 = (((2 * prec) * reca) / ((prec + reca) + 1e-08))
self.f1 += f1
self.n += 1
def sentence_uf1(self):
return (self.f1 / self.n)
def corpus_uf1(self):
if ((self.tp == 0) and (self.fp == 0)):
return 0
prec = (self.tp / (self.tp + self.fp))
recall = (self.tp / (self.tp + self.fn))
corpus_f1 = ((((2 * prec) * recall) / (prec + recall)) if ((prec + recall) > 0) else 0.0)
return corpus_f1
def score(self):
return self.sentence_uf1
def __repr__(self):
s = f'Sentence F1: {self.sentence_uf1:6.2%} Corpus F1: {self.corpus_uf1:6.2%} '
return s |
def easy_dtype(ndtype, names=None, defaultfmt='f%i', **validationargs):
try:
ndtype = np.dtype(ndtype)
except TypeError:
validate = NameValidator(**validationargs)
nbfields = len(ndtype)
if (names is None):
names = ([''] * len(ndtype))
elif isinstance(names, basestring):
names = names.split(',')
names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
ndtype = np.dtype(dict(formats=ndtype, names=names))
else:
if (names is not None):
validate = NameValidator(**validationargs)
if isinstance(names, basestring):
names = names.split(',')
if (ndtype.names is None):
formats = tuple(([ndtype.type] * len(names)))
names = validate(names, defaultfmt=defaultfmt)
ndtype = np.dtype(list(zip(names, formats)))
else:
ndtype.names = validate(names, nbfields=len(ndtype.names), defaultfmt=defaultfmt)
elif (ndtype.names is not None):
validate = NameValidator(**validationargs)
if ((ndtype.names == tuple((('f%i' % i) for i in range(len(ndtype.names))))) and (defaultfmt != 'f%i')):
ndtype.names = validate(([''] * len(ndtype.names)), defaultfmt=defaultfmt)
else:
ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
return ndtype |
_properties
class InterstateEdge(object):
assignments = Property(dtype=dict, desc="Assignments to perform upon transition (e.g., 'x=x+1; y = 0')", from_string=_assignments_from_string, to_string=_assignments_to_string)
condition = CodeProperty(desc='Transition condition', default=CodeBlock('1'))
def __init__(self, condition: CodeBlock=None, assignments=None):
if (condition is None):
condition = CodeBlock('1')
if (assignments is None):
assignments = {}
if isinstance(condition, str):
self.condition = CodeBlock(condition)
elif isinstance(condition, ast.AST):
self.condition = CodeBlock([condition])
elif isinstance(condition, list):
self.condition = CodeBlock(condition)
else:
self.condition = condition
self.assignments = {k: InterstateEdge._convert_assignment(v) for (k, v) in assignments.items()}
self._cond_sympy = None
self._uncond = None
def __setattr__(self, name: str, value: Any) -> None:
if ((name == 'condition') or (name == '_condition')):
super().__setattr__('_cond_sympy', None)
super().__setattr__('_uncond', None)
return super().__setattr__(name, value)
def _convert_assignment(assignment) -> str:
if isinstance(assignment, ast.AST):
return CodeBlock(assignment).as_string
return str(assignment)
def is_unconditional(self):
if (self._uncond is not None):
return self._uncond
self._uncond = ((self.condition is None) or (InterstateEdge.condition.to_string(self.condition).strip() == '1') or (self.condition.as_string == ''))
return self._uncond
def condition_sympy(self):
if (self._cond_sympy is not None):
return self._cond_sympy
self._cond_sympy = symbolic.pystr_to_symbolic(self.condition.as_string)
return self._cond_sympy
def read_symbols(self) -> Set[str]:
result = set(map(str, dace.symbolic.symbols_in_ast(self.condition.code[0])))
for assign in self.assignments.values():
result |= symbolic.free_symbols_and_functions(assign)
return result
def used_symbols(self, all_symbols: bool) -> Set[str]:
cond_symbols = set(map(str, dace.symbolic.symbols_in_ast(self.condition.code[0])))
lhs_symbols = set()
rhs_symbols = set()
for (lhs, rhs) in self.assignments.items():
rhs_symbols |= set(map(str, dace.symbolic.symbols_in_ast(ast.parse(rhs))))
if ((lhs not in cond_symbols) and (lhs not in rhs_symbols)):
lhs_symbols.add(lhs)
return ((cond_symbols | rhs_symbols) - lhs_symbols)
def free_symbols(self) -> Set[str]:
return self.used_symbols(all_symbols=True)
def replace_dict(self, repl: Dict[(str, str)], replace_keys=True) -> None:
if (not repl):
return
if replace_keys:
for (name, new_name) in repl.items():
_replace_dict_keys(self.assignments, name, new_name)
for (k, v) in self.assignments.items():
vast = ast.parse(v)
vast = astutils.ASTFindReplace(repl).visit(vast)
newv = astutils.unparse(vast)
if (newv != v):
self.assignments[k] = newv
condition = ast.parse(self.condition.as_string)
condition = astutils.ASTFindReplace(repl).visit(condition)
newc = astutils.unparse(condition)
if (newc != condition):
self.condition.as_string = newc
self._uncond = None
self._cond_sympy = None
def replace(self, name: str, new_name: str, replace_keys=True) -> None:
self.replace_dict({name: new_name}, replace_keys)
def new_symbols(self, sdfg, symbols) -> Dict[(str, dtypes.typeclass)]:
from dace.codegen.tools.type_inference import infer_expr_type
if (sdfg is not None):
alltypes = copy.copy(symbols)
alltypes.update({k: v.dtype for (k, v) in sdfg.arrays.items()})
else:
alltypes = symbols
inferred_lhs_symbols = {k: infer_expr_type(v, alltypes) for (k, v) in self.assignments.items()}
lhs_symbols = set()
rhs_symbols = set()
for (lhs, rhs) in self.assignments.items():
rhs_symbols |= symbolic.free_symbols_and_functions(rhs)
if (lhs not in rhs_symbols):
lhs_symbols.add(lhs)
return {k: v for (k, v) in inferred_lhs_symbols.items() if (k in lhs_symbols)}
def get_read_memlets(self, arrays: Dict[(str, dt.Data)]) -> List[mm.Memlet]:
result: List[mm.Memlet] = []
result.extend(memlets_in_ast(self.condition.code[0], arrays))
for assign in self.assignments.values():
vast = ast.parse(assign)
result.extend(memlets_in_ast(vast, arrays))
return result
def to_json(self, parent=None):
return {'type': type(self).__name__, 'attributes': dace.serialize.all_properties_to_json(self), 'label': self.label}
def from_json(json_obj, context=None):
ret = InterstateEdge()
dace.serialize.set_properties_from_json(ret, json_obj, context=context)
return ret
def label(self):
assignments = ','.join([('%s=%s' % (k, v)) for (k, v) in self.assignments.items()])
if (self.condition.as_string == '1'):
if (len(self.assignments) == 0):
return ''
return assignments
if (len(self.assignments) == 0):
return self.condition.as_string
return ((self.condition.as_string + '; ') + assignments) |
def get_cnt_sents(texts):
cnt_all_sent = 0
for text in texts:
cnt_all_sent += len(nltk.sent_tokenize(text))
return cnt_all_sent |
def generate_ann(root_path, split, image_infos):
dst_image_root = osp.join(root_path, 'dst_imgs', split)
if (split == 'training'):
dst_label_file = osp.join(root_path, 'train_label.txt')
elif (split == 'test'):
dst_label_file = osp.join(root_path, 'test_label.txt')
os.makedirs(dst_image_root, exist_ok=True)
lines = []
for image_info in image_infos:
index = 1
src_img_path = osp.join(root_path, 'imgs', image_info['file_name'])
image = mmcv.imread(src_img_path)
src_img_root = osp.splitext(image_info['file_name'])[0].split('/')[1]
for anno in image_info['anno_info']:
word = anno['word']
dst_img = crop_img(image, anno['bbox'])
if (min(dst_img.shape) == 0):
continue
dst_img_name = f'{src_img_root}_{index}.png'
index += 1
dst_img_path = osp.join(dst_image_root, dst_img_name)
mmcv.imwrite(dst_img, dst_img_path)
lines.append(f'{osp.basename(dst_image_root)}/{dst_img_name} {word}')
list_to_file(dst_label_file, lines) |
def correct_time_related_info(arch_index: int, arch_infos: Dict[(Text, ArchResults)]):
train_per_epoch_time = list(arch_infos['12'].query('darcyflow', 777).train_times.values())
train_per_epoch_time = (sum(train_per_epoch_time) / len(train_per_epoch_time))
(eval_ori_test_time, eval_x_valid_time) = ([], [])
for (key, value) in arch_infos['12'].query('darcyflow', 777).eval_times.items():
if key.startswith('ori-'):
eval_ori_test_time.append(value)
elif key.startswith('x-'):
eval_x_valid_time.append(value)
else:
raise ValueError('-- {:} --'.format(key))
(eval_ori_test_time, eval_x_valid_time) = (float(np.mean(eval_ori_test_time)), float(np.mean(eval_x_valid_time)))
nums = {'cifar10-valid-train': 25000, 'darcyflow-train': 1000, 'darcyflow-test': 100}
eval_per_sample = ((eval_ori_test_time + eval_x_valid_time) / nums['darcyflow-test'])
for (hp, arch_info) in arch_infos.items():
arch_info.reset_pseudo_train_times('darcyflow', None, ((train_per_epoch_time / nums['cifar10-valid-train']) * nums['darcyflow-train']))
arch_info.reset_pseudo_eval_times('darcyflow', None, 'ori-test', (eval_per_sample * nums['darcyflow-test']))
return arch_infos |
class SpectrogramDataset(Dataset, SpectrogramParser):
def __init__(self, audio_paths: list, transcripts: list, sos_id: int, eos_id: int, config: DictConfig, spec_augment: bool=False, dataset_path: str=None, audio_extension: str='pcm') -> None:
super(SpectrogramDataset, self).__init__(feature_extract_by=config.audio.feature_extract_by, sample_rate=config.audio.sample_rate, n_mels=config.audio.n_mels, frame_length=config.audio.frame_length, frame_shift=config.audio.frame_shift, del_silence=config.audio.del_silence, input_reverse=config.audio.input_reverse, normalize=config.audio.normalize, freq_mask_para=config.audio.freq_mask_para, time_mask_num=config.audio.time_mask_num, freq_mask_num=config.audio.freq_mask_num, sos_id=sos_id, eos_id=eos_id, dataset_path=dataset_path, transform_method=config.audio.transform_method, audio_extension=audio_extension)
self.audio_paths = list(audio_paths)
self.transcripts = list(transcripts)
self.augment_methods = ([self.VANILLA] * len(self.audio_paths))
self.dataset_size = len(self.audio_paths)
self._augment(spec_augment)
self.shuffle()
def get_item(self, idx):
feature = self.parse_audio(os.path.join(self.dataset_path, self.audio_paths[idx]), self.augment_methods[idx])
if (feature is None):
return (None, None)
transcript = self.parse_transcript(self.transcripts[idx])
return (feature, transcript)
def parse_transcript(self, transcript):
tokens = transcript.split(' ')
transcript = list()
transcript.append(int(self.sos_id))
for token in tokens:
transcript.append(int(token))
transcript.append(int(self.eos_id))
return transcript
def _augment(self, spec_augment):
if spec_augment:
logger.info('Applying Spec Augmentation...')
for idx in range(self.dataset_size):
self.augment_methods.append(self.SPEC_AUGMENT)
self.audio_paths.append(self.audio_paths[idx])
self.transcripts.append(self.transcripts[idx])
def shuffle(self):
tmp = list(zip(self.audio_paths, self.transcripts, self.augment_methods))
random.shuffle(tmp)
(self.audio_paths, self.transcripts, self.augment_methods) = zip(*tmp)
def __len__(self):
return len(self.audio_paths)
def count(self):
return len(self.audio_paths) |
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
num_groups = (planes // 8)
if (norm_fn == 'group'):
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if (not (stride == 1)):
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif (norm_fn == 'batch'):
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
if (not (stride == 1)):
self.norm3 = nn.BatchNorm2d(planes)
elif (norm_fn == 'instance'):
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
if (not (stride == 1)):
self.norm3 = nn.InstanceNorm2d(planes)
elif (norm_fn == 'none'):
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
if (not (stride == 1)):
self.norm3 = nn.Sequential()
if (stride == 1):
self.downsample = None
else:
self.downsample = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
if (self.downsample is not None):
x = self.downsample(x)
return self.relu((x + y)) |
def create_area_light(location: Tuple[(float, float, float)]=(0.0, 0.0, 5.0), rotation: Tuple[(float, float, float)]=(0.0, 0.0, 0.0), size: float=5.0, color: Tuple[(float, float, float, float)]=(1.0, 0.9, 0.8, 1.0), strength: float=1000.0, name: Optional[str]=None) -> bpy.types.Object:
if (bpy.app.version >= (2, 80, 0)):
bpy.ops.object.light_add(type='AREA', location=location, rotation=rotation)
else:
bpy.ops.object.lamp_add(type='AREA', location=location, rotation=rotation)
if (name is not None):
bpy.context.object.name = name
light = bpy.context.object.data
light.size = size
light.use_nodes = True
light.node_tree.nodes['Emission'].inputs['Color'].default_value = color
light.energy = strength
return bpy.context.object |
class layer_norm(object):
def __init__(self, name='layer_norm'):
self.name = name
def __call__(self, x):
return tf.contrib.layers.layer_norm(x, scope=self.name) |
class DropPath(nn.Module):
def __init__(self, drop_prob: float=0.0, scale_by_keep: bool=True):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, x):
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) |
def create_reverse_dependency_map():
cache = {}
all_modules = (list(PATH_TO_TRANFORMERS.glob('**/*.py')) + list(PATH_TO_TESTS.glob('**/*.py')))
all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules]
direct_deps = {m: get_module_dependencies(m, cache=cache) for m in all_modules}
something_changed = True
while something_changed:
something_changed = False
for m in all_modules:
for d in direct_deps[m]:
if d.endswith('__init__.py'):
continue
if (d not in direct_deps):
raise ValueError(f'KeyError:{d}. From {m}')
new_deps = (set(direct_deps[d]) - set(direct_deps[m]))
if (len(new_deps) > 0):
direct_deps[m].extend(list(new_deps))
something_changed = True
reverse_map = collections.defaultdict(list)
for m in all_modules:
for d in direct_deps[m]:
reverse_map[d].append(m)
for m in [f for f in all_modules if f.endswith('__init__.py')]:
direct_deps = get_module_dependencies(m, cache=cache)
deps = sum([reverse_map[d] for d in direct_deps if (not d.endswith('__init__.py'))], direct_deps)
reverse_map[m] = list((set(deps) - {m}))
return reverse_map |
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, (3, 3))
self.conv2 = nn.Conv2d(6, 16, (3, 3))
self.fc1 = nn.Linear(((16 * 6) * 6), 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view((- 1), ((16 * 6) * 6))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
x = F.log_softmax(x, dim=1)
return x |
class AdjustDefByDirectives(CythonTransform, SkipDeclarations):
def visit_ModuleNode(self, node):
self.directives = node.directives
self.in_py_class = False
self.visitchildren(node)
return node
def visit_CompilerDirectivesNode(self, node):
old_directives = self.directives
self.directives = node.directives
self.visitchildren(node)
self.directives = old_directives
return node
def visit_DefNode(self, node):
modifiers = []
if ('inline' in self.directives):
modifiers.append('inline')
nogil = self.directives.get('nogil')
except_val = self.directives.get('exceptval')
return_type_node = self.directives.get('returns')
if ((return_type_node is None) and self.directives['annotation_typing']):
return_type_node = node.return_type_annotation
if ((return_type_node is not None) and (except_val is None)):
except_val = (None, True)
elif (except_val is None):
except_val = (None, False)
if ('ccall' in self.directives):
node = node.as_cfunction(overridable=True, modifiers=modifiers, nogil=nogil, returns=return_type_node, except_val=except_val)
return self.visit(node)
if ('cfunc' in self.directives):
if self.in_py_class:
error(node.pos, 'cfunc directive is not allowed here')
else:
node = node.as_cfunction(overridable=False, modifiers=modifiers, nogil=nogil, returns=return_type_node, except_val=except_val)
return self.visit(node)
if ('inline' in modifiers):
error(node.pos, "Python functions cannot be declared 'inline'")
if nogil:
error(node.pos, "Python functions cannot be declared 'nogil'")
self.visitchildren(node)
return node
def visit_LambdaNode(self, node):
return node
def visit_PyClassDefNode(self, node):
if ('cclass' in self.directives):
node = node.as_cclass()
return self.visit(node)
else:
old_in_pyclass = self.in_py_class
self.in_py_class = True
self.visitchildren(node)
self.in_py_class = old_in_pyclass
return node
def visit_CClassDefNode(self, node):
old_in_pyclass = self.in_py_class
self.in_py_class = False
self.visitchildren(node)
self.in_py_class = old_in_pyclass
return node |
def expected_calibration_error(y_hat: Prediction, y: Tensor, n_bins: int=10) -> Tensor:
if ((y_hat.soft is None) or (y_hat.hard is None)):
return torch.as_tensor(float('nan'))
batch_size = y_hat.soft.size(0)
if (batch_size == 0):
return torch.as_tensor(float('nan'))
(acc_binned, conf_binned, bin_cardinalities) = bin_predictions(y_hat, y, n_bins)
ece = (torch.abs((acc_binned - conf_binned)) * bin_cardinalities)
ece = ((ece.sum() * 1) / batch_size)
return ece.cpu().detach() |
def main():
seed = 1234
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if (args.dataset[0] == 'deepfashion'):
ds = pd.read_csv('./Anno/df_info.csv')
from dataset import DeepFashionDataset as DataManager
elif (args.dataset[0] == 'fld'):
ds = pd.read_csv('./Anno/fld_info.csv')
from dataset import FLDDataset as DataManager
else:
raise ValueError
print(('dataset : %s' % args.dataset[0]))
if (not args.evaluate):
train_dm = DataManager(ds[(ds['evaluation_status'] == 'train')], root=args.root)
train_dl = DataLoader(train_dm, batch_size=args.batchsize, shuffle=True)
if (os.path.exists('models') is False):
os.makedirs('models')
test_dm = DataManager(ds[(ds['evaluation_status'] == 'test')], root=args.root)
test_dl = DataLoader(test_dm, batch_size=args.batchsize, shuffle=False)
print('Load the model...')
net = torch.nn.DataParallel(Network(dataset=args.dataset, flag=args.glem)).cuda()
if (not (args.weight_file == None)):
weights = torch.load(args.weight_file)
if args.update_weight:
weights = utils.load_weight(net, weights)
net.load_state_dict(weights)
if args.evaluate:
print('Evaluation only')
test(net, test_dl, 0)
return
optimizer = torch.optim.Adam(net.parameters(), lr=args.learning_rate)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 5, 0.1)
print('Start training')
for epoch in range(args.epoch):
lr_scheduler.step()
train(net, optimizer, train_dl, epoch)
test(net, test_dl, epoch) |
class SNRHomogeneousBlocks(SNRBase):
def __init__(self, patch_size: int=3, stride: Optional[int]=None, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.patch_size = patch_size
self.stride = (self.patch_size if (not stride) else stride)
def get_snr_value(self, img: np.array) -> Tuple[(float, float)]:
self.img_median = np.median(img[(img > 0)], axis=None)
img = img.astype(float)
img = np.where((img == 0), np.nan, img)
m = np.rint(np.mean(view_as_windows(img, (self.patch_size, self.patch_size), self.stride), axis=(3, 2))).flatten()
s = np.std(view_as_windows(img, (self.patch_size, self.patch_size), self.stride), axis=(3, 2), ddof=1).flatten()
if self.debug:
self.snrs = (m, s)
l_var = s[np.argwhere((m == self.img_median))]
(ll, low, up) = sigmaclip(l_var[(~ np.isnan(l_var))], high=2)
(noise, sigma_noise) = (np.nanmedian(ll), np.nanstd(ll))
return ((self.img_median / noise), ((self.img_median / (noise * noise)) * sigma_noise))
def apply_one_channel(self, image: np.array) -> Tuple[(float, float)]:
return self.get_snr_value(image) |
def _make_deprecate(meth):
new_name = meth.__name__
old_name = new_name[:(- 1)]
def deprecated_init(*args, **kwargs):
warnings.warn('nn.init.{} is now deprecated in favor of nn.init.{}.'.format(old_name, new_name), stacklevel=2)
return meth(*args, **kwargs)
deprecated_init.__doc__ = '\n {old_name}(...)\n\n .. warning::\n This method is now deprecated in favor of :func:`torch.nn.init.{new_name}`.\n\n See :func:`~torch.nn.init.{new_name}` for details.'.format(old_name=old_name, new_name=new_name)
deprecated_init.__name__ = old_name
return deprecated_init |
class Label(object):
def __init__(self, field_id, text):
self.field_id = field_id
self.text = text
def __str__(self):
return self()
def __unicode__(self):
return self()
def __html__(self):
return self()
def __call__(self, text=None, **kwargs):
if ('for_' in kwargs):
kwargs['for'] = kwargs.pop('for_')
else:
kwargs.setdefault('for', self.field_id)
attributes = widgets.html_params(**kwargs)
text = escape((text or self.text))
return Markup(('<label %s>%s</label>' % (attributes, text)))
def __repr__(self):
return ('Label(%r, %r)' % (self.field_id, self.text)) |
class SemiMarkovConditionalRandomField(torch.nn.Module):
def __init__(self, num_tags: int, default_tag: int, max_span_width: int, outside_span_tag: int=None, loss_type: str='logloss', false_positive_penalty: float=1.0, false_negative_penalty: float=1.0) -> None:
super().__init__()
self.num_tags = num_tags
self.max_span_width = max_span_width
self.default_tag = default_tag
self.outside_span_tag = outside_span_tag
self.loss_type = loss_type
self.false_positive_penalty = false_positive_penalty
self.false_negative_penalty = false_negative_penalty
def _input_likelihood(self, logits: torch.Tensor, text_mask: torch.Tensor, cost: torch.Tensor, tag_mask: torch.Tensor=None) -> torch.Tensor:
(batch_size, sequence_length, max_span_width, num_tags) = logits.size()
if (tag_mask is None):
tag_mask = Variable(torch.ones(batch_size, num_tags).cuda())
else:
tmask_sum = torch.sum(tag_mask, 1).data
assert (tmask_sum > 0).all()
logits = logits.transpose(0, 1).contiguous()
logits = logits.transpose(1, 2).contiguous()
cost = cost.transpose(0, 1).contiguous()
cost = cost.transpose(1, 2).contiguous()
default_tag_mask = torch.zeros(num_tags).cuda()
default_tag_mask[self.default_tag] = float('-inf')
default_tag_mask = Variable(default_tag_mask.view(1, 1, (- 1)))
tag_mask = (torch.log(tag_mask).view(1, batch_size, num_tags) + default_tag_mask)
alpha = Variable(torch.cuda.FloatTensor([[0.0 for _ in range(batch_size)]]), requires_grad=True)
for j in range(sequence_length):
width = max_span_width
if (j < (max_span_width - 1)):
width = (j + 1)
idx = Variable(torch.cuda.LongTensor([i for i in range(j, (j - width), (- 1))]))
reversed_alpha = alpha.index_select(dim=0, index=idx)
broadcast_alpha = reversed_alpha.view(width, batch_size, 1)
logits_at_j = logits[j]
start_indices = Variable(torch.cuda.LongTensor(range(width)))
span_factors = logits_at_j.index_select(dim=0, index=start_indices)
span_costs = cost[j].index_select(dim=0, index=start_indices)
alpha_along_arglabels = logsumexp((((broadcast_alpha + span_factors) + span_costs) + tag_mask))
alpha_at_j = logsumexp(alpha_along_arglabels, dim=0).view(1, batch_size)
alpha = torch.cat([alpha, alpha_at_j], dim=0)
actual_lengths = torch.sum(text_mask, dim=1).view(1, batch_size)
partition = alpha.gather(dim=0, index=actual_lengths)
return partition
def _joint_likelihood(self, logits: torch.Tensor, tags: torch.Tensor, mask: torch.LongTensor) -> torch.Tensor:
(batch_size, sequence_length, _, _) = logits.shape
logits = logits.transpose(0, 1).contiguous()
logits = logits.transpose(1, 2).contiguous()
mask = mask.float().transpose(0, 1).contiguous()
tags = tags.transpose(0, 1).contiguous()
tags = tags.transpose(1, 2).contiguous()
default_tags = Variable((self.default_tag * torch.ones(batch_size).long().cuda()))
numerator = 0.0
for j in range(sequence_length):
for d in range(min(self.max_span_width, sequence_length)):
current_tag = tags[j][d]
valid_tag_mask = (current_tag != default_tags).float()
current_tag = current_tag.view(batch_size, 1)
emit_score = ((logits[j][d].gather(dim=1, index=current_tag).squeeze(1) * valid_tag_mask) * mask[j])
numerator += emit_score
return numerator
def forward(self, inputs: torch.Tensor, tags: torch.Tensor, mask: torch.ByteTensor, tag_mask: Variable=None, average_batches: bool=True) -> torch.Tensor:
batch_size = inputs.size(0)
log_numerator = self._joint_likelihood(inputs, tags, mask)
if (self.loss_type == 'roc'):
cost = self._get_recall_oriented_cost(tags)
elif (self.loss_type == 'hamming'):
cost = self._get_hamming_cost(tags)
elif (self.loss_type == 'logloss'):
zeroes = (1 - ones_like(inputs))
cost = zeroes
else:
raise ConfigurationError('invalid loss type {} - use roc, hamming or logloss'.format(self.loss_type))
log_denominator = self._input_likelihood(logits=inputs, text_mask=mask, tag_mask=tag_mask, cost=cost)
log_loss = (log_numerator - log_denominator)
if (self.loss_type == 'roc'):
log_loss = (log_loss - (self.false_negative_penalty * self._get_labeled_spans_count(tags)))
batch_loss = torch.sum(log_loss)
if average_batches:
batch_loss = (batch_loss / batch_size)
if (batch_loss.data[0] > 0.0):
(max_log_loss, _) = torch.max(log_loss, (- 1))
logger.info('WARNING: invalid log loss = %f', max_log_loss.data[0])
return (batch_loss, log_numerator)
def viterbi_tags(self, logits: Variable, mask: Variable, tag_masks: Variable=None) -> List[List[int]]:
(batch_size, max_seq_length, max_span_width, num_classes) = logits.size()
if (tag_masks is None):
tag_masks = Variable(torch.ones(batch_size, num_classes).cuda())
(logits, mask, tag_masks) = (logits.data, mask.data, tag_masks.data)
sequence_lengths = torch.sum(mask, dim=(- 1))
all_tags = []
all_scores = []
for (logits_ex, tag_mask, sequence_length) in zip(logits, tag_masks, sequence_lengths):
tags = [[self.default_tag for _ in range(max_span_width)] for _ in range(max_seq_length)]
scores = [[[float('-inf') for _ in range(num_classes)] for _ in range(max_span_width)] for _ in range(max_seq_length)]
(viterbi_path, viterbi_score) = self.viterbi_decode(logits_ex[:sequence_length], tag_mask)
tags[:len(viterbi_path)] = viterbi_path
scores[:len(viterbi_score)] = viterbi_score
all_tags.append(tags)
all_scores.append(scores)
return (torch.Tensor(all_tags), torch.Tensor(all_scores))
def viterbi_decode(self, logits: torch.Tensor, tag_mask: torch.Tensor):
(sequence_length, max_span_width, num_classes) = list(logits.size())
tag_mask = torch.log(tag_mask).view(1, num_classes)
alpha = [float('-inf') for _ in range(sequence_length)]
backpointers = [(None, None) for _ in range(sequence_length)]
for j in range(sequence_length):
width = max_span_width
if (j < (max_span_width - 1)):
width = (j + 1)
start_indices = torch.cuda.LongTensor(range(width))
span_factors = logits[j].index_select(0, start_indices)
(best_span_factors, best_labels) = torch.max((span_factors + tag_mask), (- 1))
extended_alpha = ([0.0] + alpha)
broadcast_alpha = torch.cuda.FloatTensor(extended_alpha[((j + 1) - width):(j + 1)][::(- 1)])
summed_potentials = (broadcast_alpha + best_span_factors)
(best_score, best_difference) = torch.max(summed_potentials, (- 1))
best_difference = int(best_difference)
alpha[j] = float(best_score)
backpointers[j] = (best_labels[best_difference], best_difference)
viterbi_path = [[self.default_tag for _ in range(max_span_width)] for _ in range(sequence_length)]
viterbi_spans = {}
viterbi_score = torch.Tensor([[[float('-inf') for _ in range(sequence_length)] for _ in range(max_span_width)] for _ in range(num_classes)])
viterbi_score[self.default_tag] = 0.0
viterbi_score = viterbi_score.transpose(0, 2).tolist()
span_end = (sequence_length - 1)
while (span_end >= 0):
(label, width) = backpointers[span_end]
viterbi_path[span_end][width] = label
viterbi_spans[((span_end - width), span_end)] = label
if (label != self.default_tag):
viterbi_score[span_end][width][self.default_tag] = float('-inf')
viterbi_score[span_end][width][label] = alpha[span_end]
span_end = ((span_end - width) - 1)
return (viterbi_path, viterbi_score)
def convert_spans_into_sequence_of_tags(self, viterbi_spans: Dict[(Tuple[(int, int)], int)], sequence_length: int, num_classes: int) -> List[int]:
tag_sequence = [None for _ in range(sequence_length)]
tag_indicators = [[0.0 for _ in range(num_classes)] for _ in range(sequence_length)]
for span in viterbi_spans:
for position in range(span[0], (span[1] + 1)):
assert (not tag_sequence[position])
tag_sequence[position] = viterbi_spans[span]
tag_indicators[position][viterbi_spans[span]] = 1.0
assert (None not in tag_sequence)
return tag_indicators
def merge_spans(self, tag_sequence: List[int]) -> List[List[int]]:
spans = [[self.default_tag for _ in range(self.max_span_width)] for _ in range(len(tag_sequence))]
start_span = 0
current_tag = tag_sequence[0]
for (pos, tag) in enumerate(tag_sequence[1:], 1):
width = (pos - start_span)
if (tag != current_tag):
width = ((pos - 1) - start_span)
spans[(pos - 1)][width] = current_tag
start_span = pos
current_tag = tag
width = (pos - start_span)
elif (width == (self.max_span_width - 1)):
spans[pos][width] = current_tag
start_span = (pos + 1)
if ((pos + 1) < len(tag_sequence)):
current_tag = tag_sequence[(pos + 1)]
spans[(len(tag_sequence) - 1)][((len(tag_sequence) - 1) - start_span)] = tag_sequence[(- 1)]
return spans
def _get_hamming_cost(self, tags: torch.Tensor) -> torch.Tensor:
(batch_size, sequence_length, max_span_width) = tags.size()
tags = tags.unsqueeze(dim=(- 1))
zeros = Variable(torch.zeros(batch_size, sequence_length, max_span_width, self.num_tags).float().cuda())
scattered_tags = zeros.scatter_((- 1), tags, 1)
cost = (1 - scattered_tags)
default_tags_mask = (1 - tags.eq(self.default_tag).float())
cost = (cost * default_tags_mask)
return (self.false_positive_penalty * cost)
def _get_simple_recall_cost(self, tags: torch.Tensor) -> torch.Tensor:
(batch_size, sequence_length, max_span_width) = tags.size()
tags = tags.unsqueeze(dim=(- 1))
zeros = Variable(torch.zeros(batch_size, sequence_length, max_span_width, self.num_tags).float().cuda())
scattered_tags = zeros.scatter_((- 1), tags, 1)
cost = (1 - scattered_tags)
irrelevant_tags = (tags.eq(self.default_tag) | tags.eq(self.outside_span_tag))
irrelevant_tags_mask = (1 - irrelevant_tags.float())
cost = (cost * irrelevant_tags_mask)
return (self.false_negative_penalty * cost)
def _get_recall_oriented_cost(self, tags: torch.Tensor):
(batch_size, sequence_length, max_span_width) = tags.size()
tags = tags.unsqueeze(dim=(- 1))
zeros = Variable(torch.zeros(batch_size, sequence_length, max_span_width, self.num_tags).float().cuda())
scattered_tags = zeros.scatter_((- 1), tags, 1)
cost = (1 - scattered_tags)
default_tags_mask = (1 - tags.eq(self.default_tag).float())
fp = (cost * default_tags_mask)
fp = fp.index_fill_((- 1), Variable(torch.cuda.LongTensor([self.outside_span_tag])), 0)
fp = (self.false_positive_penalty * fp)
irrelevant_tags = (tags.eq(self.default_tag) | tags.eq(self.outside_span_tag))
irrelevant_tags_mask = (1 - irrelevant_tags.float())
fn = (((- self.false_negative_penalty) * cost) * irrelevant_tags_mask)
return (fp + fn)
def _get_labeled_spans_count(self, tags: torch.Tensor):
(batch_size, sequence_length, max_span_width) = tags.size()
tags = tags.unsqueeze(dim=(- 1))
zeros = Variable(torch.zeros(batch_size, sequence_length, max_span_width, self.num_tags).float().cuda())
scattered_tags = zeros.scatter_((- 1), tags, 1)
irrelevant_tags = (tags.eq(self.default_tag) | tags.eq(self.outside_span_tag))
irrelevant_tags_mask = (1 - irrelevant_tags.float())
total_relevant_labels = torch.sum(torch.sum(torch.sum((scattered_tags * irrelevant_tags_mask), (- 1)), (- 1)), (- 1))
return total_relevant_labels |
.parametrize('cfg_file', ['../configs/textrecog/sar/sar_r31_parallel_decoder_academic.py'])
def test_disable_text_recog_aug_test(cfg_file):
tmp_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
config_file = os.path.join(tmp_dir, cfg_file)
cfg = Config.fromfile(config_file)
test = cfg.data.test.datasets[0]
cfg1 = copy.deepcopy(cfg)
test1 = copy.deepcopy(test)
test1.pipeline = cfg1.data.test.pipeline
cfg1.data.test = test1
cfg1 = disable_text_recog_aug_test(cfg1, set_types=['test'])
assert (cfg1.data.test.pipeline[1].type != 'MultiRotateAugOCR')
cfg2 = copy.deepcopy(cfg)
test2 = copy.deepcopy(test)
test2.pipeline = cfg2.data.test.pipeline
cfg2.data.test.datasets = [test2]
cfg2 = disable_text_recog_aug_test(cfg2, set_types=['test'])
assert (cfg2.data.test.pipeline[1].type != 'MultiRotateAugOCR')
assert (cfg2.data.test.datasets[0].pipeline[1].type != 'MultiRotateAugOCR')
cfg3 = copy.deepcopy(cfg)
test3 = copy.deepcopy(test)
test3.pipeline = cfg3.data.test.pipeline
cfg3.data.test = Config(dict(type='ConcatDataset', datasets=[test3]))
cfg3 = disable_text_recog_aug_test(cfg3, set_types=['test'])
assert (cfg3.data.test.datasets[0].pipeline[1].type != 'MultiRotateAugOCR')
cfg4 = copy.deepcopy(cfg)
test4 = copy.deepcopy(test)
test4.pipeline = cfg4.data.test.pipeline
cfg4.data.test.datasets = [[test4], [test]]
cfg4.data.test.pipeline = [cfg4.data.test.pipeline, cfg4.data.test.pipeline]
cfg4 = disable_text_recog_aug_test(cfg4, set_types=['test'])
assert (cfg4.data.test.datasets[0][0].pipeline[1].type != 'MultiRotateAugOCR')
cfg5 = copy.deepcopy(cfg)
test5 = copy.deepcopy(test)
test5.pipeline = copy.deepcopy(cfg5.data.test.pipeline)
cfg5.data.test.datasets = [test5]
cfg5.data.test.pipeline = None
cfg5 = disable_text_recog_aug_test(cfg5, set_types=['test'])
assert (cfg5.data.test.datasets[0].pipeline[1].type != 'MultiRotateAugOCR') |
def infect(person_sex, relation_type, relation_sex):
infection_probability_month = {'f': {'parent': {'f': 0., 'm': 0.}, 'sibling': {'f': 0., 'm': 0.}, 'partner': {'*': 0.}, 'child': {'*': 0.}}, 'm': {'parent': {'f': 0., 'm': 0.}, 'sibling': {'f': 0., 'm': 0.}, 'partner': {'*': 0.}, 'child': {'*': 0.}}, '*': {'*': {'*': 1.e-05}}}
probability = infection_probability_month[person_sex][relation_type][relation_sex]
return np.random.binomial(1, probability) |
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
label_map = {label: i for (i, label) in enumerate(label_list, 1)}
features = []
for (ex_index, example) in enumerate(examples):
textlist = example.text_a.split(' ')
labellist = example.label
tokens = []
labels = []
valid = []
label_mask = []
for (i, word) in enumerate(textlist):
token = tokenizer.tokenize(word)
tokens.extend(token)
label_1 = labellist[i]
for m in range(len(token)):
if (m == 0):
labels.append(label_1)
valid.append(1)
label_mask.append(1)
else:
valid.append(0)
if (len(tokens) >= (max_seq_length - 1)):
tokens = tokens[0:(max_seq_length - 2)]
labels = labels[0:(max_seq_length - 2)]
valid = valid[0:(max_seq_length - 2)]
label_mask = label_mask[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append('[CLS]')
segment_ids.append(0)
valid.insert(0, 1)
label_mask.insert(0, 1)
label_ids.append(label_map['[CLS]'])
for (i, token) in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
if (len(labels) > i):
label_ids.append(label_map[labels[i]])
ntokens.append('[SEP]')
segment_ids.append(0)
valid.append(1)
label_mask.append(1)
label_ids.append(label_map['[SEP]'])
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = ([1] * len(input_ids))
label_mask = ([1] * len(label_ids))
while (len(input_ids) < max_seq_length):
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
valid.append(1)
label_mask.append(0)
while (len(label_ids) < max_seq_length):
label_ids.append(0)
label_mask.append(0)
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
assert (len(label_ids) == max_seq_length)
assert (len(valid) == max_seq_length)
assert (len(label_mask) == max_seq_length)
if (ex_index < 5):
logger.info('*** Example ***')
logger.info(('guid: %s' % example.guid))
logger.info(('tokens: %s' % ' '.join([str(x) for x in tokens])))
logger.info(('input_ids: %s' % ' '.join([str(x) for x in input_ids])))
logger.info(('input_mask: %s' % ' '.join([str(x) for x in input_mask])))
logger.info(('segment_ids: %s' % ' '.join([str(x) for x in segment_ids])))
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_ids, valid_ids=valid, label_mask=label_mask))
return features |
def get_score(submission_folder='../env'):
submission_path = os.path.join(submission_folder, 'submission.csv')
submission = pd.read_csv(submission_path)
test_data = pd.read_csv('answer.csv')
mae = (sum(abs((submission['SalePrice'] - test_data['SalePrice']))) / len(test_data['SalePrice']))
return mae |
def find_incoming_edges(node, dfg):
if isinstance(dfg, SDFG):
result = []
for state in dfg.nodes():
result.extend(list(state.in_edges(node)))
return result
else:
return list(dfg.in_edges(node)) |
class InPlaceABNSyncWrapper(nn.Module):
def __init__(self, *args, **kwargs):
super(InPlaceABNSyncWrapper, self).__init__()
self.bn = InPlaceABNSync(*args, **kwargs)
def forward(self, input):
return self.bn(input) |
def test_plot_lcs():
model_dir = (dir_path + 'models/')
model_files = [e for e in Path(model_dir).glob('*/*.pt')]
for mf in model_files:
cmd = f'python run.py --plot_lcs --dump_dir tests/dump/ --model_files {mf}'
call_cmd(cmd) |
class ResNet(nn.Module):
def __init__(self, last_stride=2, block=Bottleneck, layers=(3, 4, 6, 3)):
super().__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def load_param(self, model_path):
if (model_path == ''):
param_dict = torchvision.models.resnet50(pretrained=True).state_dict()
else:
param_dict = torch.load(model_path)
for i in param_dict:
if ('fc' in i):
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_() |
def run_zeroshot(fw_name, quesion_json, article_json, model, gpu, retriver='None'):
if (not os.path.exists(fw_name)):
if (retriver == 'bm25'):
print('Use BM25 retrieved dialogue')
retrieved = json.load(open('../../retriever/output_retriever_rank_bm25.json'))
retrieve_article = {}
for r in retrieved:
retrieve_article[r['id']] = r['retrieved_article_segment_id']
elif (retriver == 'dpr'):
print('Use DPR retrieved dialogue')
retrieved = json.load(open('../../retriever/output_retriever_rank_dpr-wiki.json'))
retrieve_article = {}
for r in retrieved:
retrieve_article[r['id']] = r['retrieved_article_segment_id']
else:
print('Use gold dialogue')
qa_pipeline_pred = {}
qa_pipeline = pipeline('question-answering', model=mod, device=gpu)
for qa_pair in tqdm(quesion_json):
question = qa_pair['question'].lower()
if (retriver == 'None'):
context = article_json[qa_pair['article_segment_id']]['seg_dialog']
else:
context = article_json[retrieve_article[qa_pair['id']]]['seg_dialog']
context = ' '.join([f"{c['speaker']}: {c['text']}" for c in context]).lower()
answer = qa_pipeline(question=question, context=context, handle_impossible_answer=True)
if (answer['answer'] == ''):
answer['answer'] = 'unanswerable'
qa_pipeline_pred[qa_pair['id']] = answer['answer'].lower()
with open(fw_name, 'w') as fout:
json.dump(qa_pipeline_pred, fout, indent=2)
else:
qa_pipeline_pred = json.load(open(fw_name)) |
def pjit(fun: Callable, in_axis_resources, out_axis_resources, static_argnums: Union[(int, Sequence[int])]=(), donate_argnums: Union[(int, Sequence[int])]=(), backend: Optional[str]=None):
del backend
return jax_pjit(fun, in_axis_resources, out_axis_resources, static_argnums=static_argnums, donate_argnums=donate_argnums) |
def get_world_size():
if ('WORLD_SIZE' in os.environ):
return int(os.environ['WORLD_SIZE'])
else:
if (not dist.is_available()):
return 1
if (not dist.is_initialized()):
return 1
return dist.get_world_size() |
def add_eval_lm_args(parser):
group = parser.add_argument_group('LM Evaluation')
add_common_eval_args(group)
group.add_argument('--output-word-probs', action='store_true', help='if set, outputs words and their predicted log probabilities to standard output')
group.add_argument('--output-word-stats', action='store_true', help='if set, outputs word statistics such as word count, average probability, etc') |
()
def test_information_retrieval_challenge_a(information_retrieval_agents: Agent, monkeypatch: pytest.MonkeyPatch, patched_api_requestor: MockerFixture, level_to_run: int, challenge_name: str) -> None:
information_retrieval_agent = information_retrieval_agents[(level_to_run - 1)]
run_interaction_loop(monkeypatch, information_retrieval_agent, CYCLE_COUNT, challenge_name, level_to_run)
file_path = get_workspace_path(information_retrieval_agent, OUTPUT_LOCATION)
content = read_file(file_path, information_retrieval_agent)
expected_revenues = EXPECTED_REVENUES[(level_to_run - 1)]
for revenue in expected_revenues:
assert ((f'{revenue}.' in content) or (f'{revenue},' in content)), f'Expected the file to contain {revenue}' |
def detnet_fpn_backbone(backbone_name, pretrained):
backbone = detnet.__dict__[backbone_name](pretrained=pretrained)
in_channels_stage2 = (backbone.inplanes // 4)
in_channels_list = [in_channels_stage2, (in_channels_stage2 * 2), (in_channels_stage2 * 4), (in_channels_stage2 * 4), (in_channels_stage2 * 4)]
for (name, parameter) in backbone.named_parameters():
if (('layer2' not in name) and ('layer3' not in name) and ('layer4' not in name) and ('layer5' not in name)):
parameter.requires_grad_(False)
return_layers = {'layer1': 0, 'layer2': 1, 'layer3': 2, 'layer4': 3, 'layer5': 4}
out_channels = 256
return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels) |
def add_with_offset(index, data, offset, valids=None):
ids = ((np.arange(data.shape[0]) + offset) + index.ntotal)
if (valids is not None):
data = data[valids]
ids = ids[valids]
index.add_with_ids(data, ids) |
def _build(opt):
dpath = os.path.join(opt['datapath'], 'empatheticdialogues')
version = '1.1'
if (not build_data.built(dpath, version_string=version)):
print((('[building data: ' + dpath) + ']'))
if build_data.built(dpath):
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
build_data.mark_done(dpath, version_string=version) |
def quat_from_two_vectors(v0: np.ndarray, v1: np.ndarray) -> np.quaternion:
v0 = (v0 / np.linalg.norm(v0))
v1 = (v1 / np.linalg.norm(v1))
c = v0.dot(v1)
if (c < ((- 1) + 1e-08)):
c = max(c, (- 1))
m = np.stack([v0, v1], 0)
(_, _, vh) = np.linalg.svd(m, full_matrices=True)
axis = vh[2]
w2 = ((1 + c) * 0.5)
w = np.sqrt(w2)
axis = (axis * np.sqrt((1 - w2)))
return np.quaternion(w, *axis)
axis = np.cross(v0, v1)
s = np.sqrt(((1 + c) * 2))
return np.quaternion((s * 0.5), *(axis / s)) |
.node
class Pgemm(dace.sdfg.nodes.LibraryNode):
implementations = {'MKLMPICH': ExpandPgemmMKLMPICH, 'MKLOpenMPI': ExpandPgemmMKLOpenMPI, 'ReferenceMPICH': ExpandPgemmReferenceMPICH, 'ReferenceOpenMPI': ExpandPgemmReferenceOpenMPI}
default_implementation = None
m = dace.properties.SymbolicProperty(allow_none=True, default=None)
n = dace.properties.SymbolicProperty(allow_none=True, default=None)
k = dace.properties.SymbolicProperty(allow_none=True, default=None)
def __init__(self, name, m=None, n=None, k=None, *args, **kwargs):
super().__init__(name, *args, inputs={'_a', '_b', '_a_block_sizes', '_b_block_sizes'}, outputs={'_c'}, **kwargs)
self.m = m
self.n = n
self.k = k
def validate(self, sdfg, state):
(a, b, c, desca, descb, gdescc, ldesc) = (None, None, None, None, None, None, None)
for e in state.in_edges(self):
if (e.dst_conn == '_a'):
a = sdfg.arrays[e.data.data]
if (e.dst_conn == '_b'):
b = sdfg.arrays[e.data.data]
if (e.dst_conn == '_desca'):
desca = sdfg.arrays[e.data.data]
if (e.dst_conn == '_descb'):
descb = sdfg.arrays[e.data.data]
for e in state.out_edges(self):
if (e.src_conn == '_gdescc'):
gdescc = sdfg.arrays[e.data.data]
if (e.src_conn == '_ldescc'):
ldescc = sdfg.arrays[e.data.data]
if (e.src_conn == '_c'):
c = sdfg.arrays[e.data.data]
if (a.dtype.base_type != b.dtype.base_type):
raise ValueError('The types of A and B do not match!')
if (c.dtype.base_type != b.dtype.base_type):
raise ValueError('The types of B and C do not match!')
return (a, b, c, desca, descb, gdescc, ldesc) |
class TFAlbertForPreTraining(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class SpatialOffsetBlock(nn.Module):
def __init__(self, ch_in, ch_ref, ks):
super(SpatialOffsetBlock, self).__init__()
nhidden = 64
self.offset0 = SpatialOffset(ch_in, ks)
self.norm_ref = nn.InstanceNorm2d(ch_ref, affine=False)
def forward(self, x, ref):
x_sigma = torch.std(x, dim=[2, 3], keepdim=True)
ref_norm = self.norm_ref(ref)
offset = self.offset0(ref_norm)
out = (x + (x_sigma * offset))
return out |
def add_weight_decay(weight_decay: float, filter_fn: Optional[FilterFn]=None) -> optax.GradientTransformation:
def init_fn(_) -> AddWeightDecayState:
return AddWeightDecayState()
def update_fn(updates: optax.Updates, state: AddWeightDecayState, params: optax.Params) -> Tuple[(optax.Updates, AddWeightDecayState)]:
new_updates = jax.tree_multimap((lambda g, p: (g + (weight_decay * p))), updates, params)
new_updates = _partial_update(updates, new_updates, params, filter_fn)
return (new_updates, state)
return optax.GradientTransformation(init_fn, update_fn) |
def test_pyro_bayesian_train_sample_mixin_with_local():
adata = synthetic_iid()
BayesianRegressionModel.setup_anndata(adata)
mod = BayesianRegressionModel(adata, per_cell_weight=True)
mod.train(max_epochs=2, batch_size=128, lr=0.01, train_size=1)
assert (list(mod.module.guide.state_dict()['locs.linear.weight_unconstrained'].shape) == [1, 100])
samples = mod.sample_posterior(num_samples=10, batch_size=None, return_samples=True)
assert (len(samples['posterior_samples']['sigma']) == 10)
assert (samples['posterior_samples']['per_cell_weights'].shape == (10, adata.n_obs, 1)) |
def coeff_repr(c):
try:
return c._latex_coeff_repr()
except AttributeError:
pass
if isinstance(c, (int, float)):
return str(c)
s = latex(c)
if ((s.find('+') != (- 1)) or (s.find('-') != (- 1))):
return ('(%s)' % s)
return s |
class FindDependenciesLdd():
def __init__(self):
self.cmd = ['ldd']
try:
st = call(self.cmd, stdout=PIPE, stderr=PIPE)
except OSError:
raise RuntimeError(('command %s cannot be run' % self.cmd))
def get_dependencies(self, file):
p = Popen((self.cmd + [file]), stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate()
if (not (p.returncode == 0)):
raise RuntimeError(('Failed to check dependencies for %s' % file))
return stdout
def grep_dependencies(self, file, deps):
stdout = self.get_dependencies(file)
rdeps = dict([(asbytes(dep), re.compile(asbytes(dep))) for dep in deps])
founds = []
for l in stdout.splitlines():
for (k, v) in rdeps.items():
if v.search(l):
founds.append(k)
return founds |
def test_coverage_entry_add():
assert ((CoverageEntry(2, 1) + CoverageEntry(3, 7)) == CoverageEntry(5, 8)) |
def download_temp_file(url, local_path=None, untar=False):
if (local_path is None):
local_path = url.rsplit('/', 1)[(- 1)]
local_path = os.path.join(temp_directory(), local_path)
mkdir_p(os.path.dirname(local_path))
if (not os.path.isfile(local_path)):
print('Downloading {:s} to {:s}...'.format(url, local_path))
f = urllib.request.urlopen(url)
with open(local_path, 'wb') as local_f:
local_f.write(f.read())
if untar:
with tarfile.open(local_path) as tar_f:
tar_f.extractall(temp_directory())
if untar:
return temp_directory()
else:
return local_path |
def read_html_template(path):
with open(path) as f:
template = f.read()
return template |
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if (len(rich_path_glob) > 1):
assert (len(rich_path_glob) == 3), rich_path_glob
(prefix, set, suffix) = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
(yield path)
elif ('**' not in path_glob):
for item in std_iglob(path_glob):
(yield item)
else:
(prefix, radical) = path_glob.split('**', 1)
if (prefix == ''):
prefix = '.'
if (radical == ''):
radical = '*'
else:
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for (path, dir, files) in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
(yield fn) |
def main(install_dir):
INSTALLED_DIR = os.path.join(ROOT_DIR, install_dir)
if (not os.path.exists(INSTALLED_DIR)):
raise ValueError(f'Provided install dir {INSTALLED_DIR} does not exist')
scipy_test_files = get_test_files(SCIPY_DIR)
installed_test_files = get_test_files(INSTALLED_DIR)
for test_file in scipy_test_files.keys():
if (test_file in exception_list_test_files):
continue
if (test_file not in installed_test_files.keys()):
raise Exception(('%s is not installed' % scipy_test_files[test_file]))
print(' All the test files were installed ')
scipy_pyi_files = get_pyi_files(SCIPY_DIR)
installed_pyi_files = get_pyi_files(INSTALLED_DIR)
for pyi_file in scipy_pyi_files.keys():
if (pyi_file not in installed_pyi_files.keys()):
raise Exception(('%s is not installed' % scipy_pyi_files[pyi_file]))
print(' All the .pyi files were installed ') |
def tdm_td3_experiment(variant):
import railrl.samplers.rollout_functions as rf
import railrl.torch.pytorch_util as ptu
from railrl.data_management.obs_dict_replay_buffer import ObsDictRelabelingBuffer
from railrl.exploration_strategies.base import PolicyWrappedWithExplorationStrategy
from railrl.state_distance.tdm_networks import TdmQf, TdmPolicy
from railrl.state_distance.tdm_td3 import TdmTd3
from railrl.state_distance.subgoal_planner import SubgoalPlanner
from railrl.misc.asset_loader import local_path_from_s3_or_local_path
import joblib
preprocess_rl_variant(variant)
env = get_envs(variant)
es = get_exploration_strategy(variant, env)
observation_key = variant.get('observation_key', 'latent_observation')
desired_goal_key = variant.get('desired_goal_key', 'latent_desired_goal')
achieved_goal_key = desired_goal_key.replace('desired', 'achieved')
vectorized = ('vectorized' in env.reward_type)
variant['algo_kwargs']['tdm_kwargs']['vectorized'] = vectorized
variant['replay_buffer_kwargs']['vectorized'] = vectorized
if ('ckpt' in variant):
if ('ckpt_epoch' in variant):
epoch = variant['ckpt_epoch']
filename = local_path_from_s3_or_local_path(osp.join(variant['ckpt'], ('itr_%d.pkl' % epoch)))
else:
filename = local_path_from_s3_or_local_path(osp.join(variant['ckpt'], 'params.pkl'))
print('Loading ckpt from', filename)
data = joblib.load(filename)
qf1 = data['qf1']
qf2 = data['qf2']
policy = data['policy']
variant['algo_kwargs']['base_kwargs']['reward_scale'] = policy.reward_scale
else:
obs_dim = env.observation_space.spaces[observation_key].low.size
goal_dim = env.observation_space.spaces[desired_goal_key].low.size
action_dim = env.action_space.low.size
variant['qf_kwargs']['vectorized'] = vectorized
norm_order = env.norm_order
variant['qf_kwargs']['norm_order'] = norm_order
env.reset()
(_, rew, _, _) = env.step(env.action_space.sample())
if hasattr(rew, '__len__'):
variant['qf_kwargs']['output_dim'] = len(rew)
qf1 = TdmQf(env=env, observation_dim=obs_dim, goal_dim=goal_dim, action_dim=action_dim, **variant['qf_kwargs'])
qf2 = TdmQf(env=env, observation_dim=obs_dim, goal_dim=goal_dim, action_dim=action_dim, **variant['qf_kwargs'])
policy = TdmPolicy(env=env, observation_dim=obs_dim, goal_dim=goal_dim, action_dim=action_dim, reward_scale=variant['algo_kwargs']['base_kwargs'].get('reward_scale', 1.0), **variant['policy_kwargs'])
eval_policy = None
if (variant.get('eval_policy', None) == 'SubgoalPlanner'):
eval_policy = SubgoalPlanner(env, qf1, policy, observation_key=observation_key, desired_goal_key=desired_goal_key, achieved_goal_key=achieved_goal_key, state_based=variant.get('do_state_exp', False), max_tau=variant['algo_kwargs']['tdm_kwargs']['max_tau'], reward_scale=variant['algo_kwargs']['base_kwargs'].get('reward_scale', 1.0), **variant['SubgoalPlanner_kwargs'])
exploration_policy = PolicyWrappedWithExplorationStrategy(exploration_strategy=es, policy=policy)
replay_buffer = ObsDictRelabelingBuffer(env=env, observation_key=observation_key, desired_goal_key=desired_goal_key, achieved_goal_key=achieved_goal_key, **variant['replay_buffer_kwargs'])
algo_kwargs = variant['algo_kwargs']
algo_kwargs['replay_buffer'] = replay_buffer
base_kwargs = algo_kwargs['base_kwargs']
base_kwargs['training_env'] = env
base_kwargs['render'] = variant.get('render', False)
base_kwargs['render_during_eval'] = variant.get('render_during_eval', False)
tdm_kwargs = algo_kwargs['tdm_kwargs']
tdm_kwargs['observation_key'] = observation_key
tdm_kwargs['desired_goal_key'] = desired_goal_key
algorithm = TdmTd3(env, qf1=qf1, qf2=qf2, policy=policy, exploration_policy=exploration_policy, eval_policy=eval_policy, **variant['algo_kwargs'])
if variant.get('test_ckpt', False):
algorithm.post_epoch_funcs.append(get_update_networks_func(variant))
vis_variant = variant.get('vis_kwargs', {})
vis_list = vis_variant.get('vis_list', [])
if vis_variant.get('save_video', True):
rollout_function = rf.create_rollout_function(rf.tdm_rollout, init_tau=algorithm._sample_max_tau_for_rollout(), decrement_tau=algorithm.cycle_taus_for_rollout, cycle_tau=algorithm.cycle_taus_for_rollout, max_path_length=algorithm.max_path_length, observation_key=algorithm.observation_key, desired_goal_key=algorithm.desired_goal_key, vis_list=vis_list, dont_terminate=True)
video_func = get_video_save_func(rollout_function, env, variant)
algorithm.post_epoch_funcs.append(video_func)
if ptu.gpu_enabled():
print('using GPU')
algorithm.cuda()
if (not variant.get('do_state_exp', False)):
env.vae.cuda()
env.reset()
if (not variant.get('do_state_exp', False)):
env.dump_samples(epoch=None)
env.dump_reconstructions(epoch=None)
env.dump_latent_plots(epoch=None)
algorithm.train() |
def download_coco(path, overwrite=False):
_DOWNLOAD_URLS = [(' '10ad623668ab00c62c096f0ed636d6aff41faca5'), (' '8551ee4bb5860311e79dace7e79cb91e432e78b3'), (' '4950dc9d00dbe1c933ee0170fd2a41')]
os.makedirs(path)
for (url, checksum) in _DOWNLOAD_URLS:
filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum)
with zipfile.ZipFile(filename) as zf:
zf.extractall(path=path) |
def test_case5():
url = (brokerIp + '/ngsi-ld/v1/entities/')
headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json'}
r = requests.post(url, data=json.dumps(ld_data.subdata5), headers=headers)
print(r.status_code)
assert (r.status_code == 201) |
_utils.test()
def test_parent_exceeded():
val = ti.field(ti.f32)
m = 7
n = 3
blk1 = ti.root.dense(ti.i, m)
blk2 = blk1.dense(ti.j, n)
blk2.place(val)
assert (val.snode.parent() == blk2)
assert (val.snode.parent(2) == blk1)
assert (val.snode.parent(3) == ti.root)
assert (val.snode.parent(4) == None)
assert (val.snode.parent(42) == None)
assert (ti.root.parent() == None) |
def SGD_S2(W_2, Y, V, S_2, gamma):
V = V.T
return (gamma * (S_2.dot(np.transpose(V)) - W_2.dot(Y)).dot(V)) |
def func_set_import_onnx_opset(opset):
opset = opset[len('opset_'):]
target_func_list = []
source_func_list = []
for (nnabla_func, impl_funcs) in _onnx_func_info.items():
for onnx_func in impl_funcs:
_opset = onnx_func.split('')[1]
if (_opset <= opset):
target_func_list.append(onnx_func)
for (nnabla_func, impl_funcs) in _onnx_func_info.items():
if (set(impl_funcs) <= set(target_func_list)):
source_func_list.append(nnabla_func)
return set(source_func_list) |
def two_layer(x, FLAGS):
x_ravel = tf.reshape(x, [(- 1), FLAGS['dimension']])
W_fc1 = weight_variable('W_fc1', [FLAGS['dimension'], FLAGS['num_hidden']])
b_fc1 = bias_variable('b_fc1', [FLAGS['num_hidden']])
W = tf.get_variable('W_fc2', initializer=tf.truncated_normal([FLAGS['num_hidden'], FLAGS['num_classes']], stddev=0.1))
b = tf.get_variable('b_fc2', initializer=tf.zeros([FLAGS['num_classes']]))
h_fc1 = tf.nn.relu((tf.matmul(x_ravel, W_fc1) + b_fc1))
y = (tf.matmul(h_fc1, W) + b)
return y |
def get_loaders(dataset, label_class, batch_size):
if (dataset in ['cifar10', 'fashion']):
if (dataset == 'cifar10'):
ds = torchvision.datasets.CIFAR10
transform = transform_color
coarse = {}
trainset = ds(root='data', train=True, download=True, transform=transform, **coarse)
testset = ds(root='data', train=False, download=True, transform=transform, **coarse)
elif (dataset == 'fashion'):
ds = torchvision.datasets.FashionMNIST
transform = transform_gray
coarse = {}
trainset = ds(root='data', train=True, download=True, transform=transform, **coarse)
testset = ds(root='data', train=False, download=True, transform=transform, **coarse)
idx = (np.array(trainset.targets) == label_class)
testset.targets = [int((t != label_class)) for t in testset.targets]
trainset.data = trainset.data[idx]
trainset.targets = [trainset.targets[i] for (i, flag) in enumerate(idx, 0) if flag]
train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2, drop_last=False)
test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2, drop_last=False)
return (train_loader, test_loader)
else:
print('Unsupported Dataset')
exit() |
class KernelLossBase():
def __init__(self, quantum_kernel: KernelMatrixBase) -> None:
self._quantum_kernel = quantum_kernel
def compute(self):
raise NotImplementedError |
class BLEURTAligner(Aligner):
def __init__(self, aggr_type, checkpoint, device, *args, **kwargs):
Aligner.__init__(self, aggr_type=None)
state_dict = torch.load(checkpoint)
config = transformers.BertConfig()
bleurt_model = BleurtModel(config)
bleurt_model.load_state_dict(state_dict, strict=False)
for param in bleurt_model.parameters():
param.requires_grad = False
bleurt_model.eval()
self.device = device
if (self.device is None):
self.device = ('cuda' if torch.cuda.is_available() else 'cpu')
self.bleurt_model = bleurt_model.to(self.device)
self.tokenizer = AutoTokenizer.from_pretrained(DEFAULT_TOKENIZER)
def align(self, input_text, context):
encoding = self.tokenizer(context, input_text, truncation='longest_first', max_length=MAX_LENGTH, return_tensors='pt').to(self.device)
(input_ids, input_mask, segment_ids) = (encoding['input_ids'], encoding['attention_mask'], encoding['token_type_ids'])
score = self.bleurt_model(input_ids, input_mask, segment_ids).tolist()[0]
tokens = input_text.split()
return (tokens, score) |
class Set_object_binary(Set_object, metaclass=ClasscallMetaclass):
def __classcall__(cls, X, Y, *args, **kwds):
if (not isinstance(X, Set_object)):
X = Set(X)
if (not isinstance(Y, Set_object)):
Y = Set(Y)
return type.__call__(cls, X, Y, *args, **kwds)
def __init__(self, X, Y, op, latex_op, category=None):
self._X = X
self._Y = Y
self._op = op
self._latex_op = latex_op
Set_object.__init__(self, self, category=category)
def _repr_(self):
return 'Set-theoretic {} of {} and {}'.format(self._op, self._X, self._Y)
def _latex_(self):
return ((latex(self._X) + self._latex_op) + latex(self._Y))
def __hash__(self):
return hash((self._X, self._Y, self._op)) |
class NRTRModalityTransform(nn.Module):
def __init__(self, input_channels=3, input_height=32):
super().__init__()
self.conv_1 = nn.Conv2d(in_channels=input_channels, out_channels=32, kernel_size=3, stride=2, padding=1)
self.relu_1 = nn.ReLU(True)
self.bn_1 = nn.BatchNorm2d(32)
self.conv_2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.relu_2 = nn.ReLU(True)
self.bn_2 = nn.BatchNorm2d(64)
feat_height = (input_height // 4)
self.linear = nn.Linear(512, 512)
def init_weights(self, pretrained=None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
uniform_init(m)
def forward(self, x):
x = self.conv_1(x)
x = self.relu_1(x)
x = self.bn_1(x)
x = self.conv_2(x)
x = self.relu_2(x)
x = self.bn_2(x)
(n, c, h, w) = x.size()
x = x.permute(0, 3, 2, 1).contiguous().view(n, w, (h * c))
x = self.linear(x)
return x |
def test_regulararray_localindex():
v2_array = ak.operations.from_numpy(np.arange(((2 * 3) * 5)).reshape(2, 3, 5), regulararray=True, highlevel=False)
assert (to_list(ak._do.local_index(v2_array, 0)) == [0, 1])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, 1)) == [[0, 1, 2], [0, 1, 2]])
assert (ak._do.local_index(v2_array.to_typetracer(), 1).form == ak._do.local_index(v2_array, 1).form)
assert (to_list(ak._do.local_index(v2_array, 2)) == [[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]])
assert (ak._do.local_index(v2_array.to_typetracer(), 2).form == ak._do.local_index(v2_array, 2).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
assert (to_list(ak._do.local_index(v2_array, (- 2))) == [[0, 1, 2], [0, 1, 2]])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 2)).form == ak._do.local_index(v2_array, (- 2)).form)
assert (to_list(ak._do.local_index(v2_array, (- 3))) == [0, 1])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 3)).form == ak._do.local_index(v2_array, (- 3)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 4))
with pytest.raises(IndexError):
ak._do.local_index(v2_array, 3)
v2_array = ak.operations.from_numpy(np.arange((((2 * 3) * 5) * 10)).reshape(2, 3, 5, 10), regulararray=True, highlevel=False)
assert (to_list(ak._do.local_index(v2_array, 0)) == [0, 1])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, 1)) == [[0, 1, 2], [0, 1, 2]])
assert (ak._do.local_index(v2_array.to_typetracer(), 1).form == ak._do.local_index(v2_array, 1).form)
assert (to_list(ak._do.local_index(v2_array, 2)) == [[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]])
assert (ak._do.local_index(v2_array.to_typetracer(), 2).form == ak._do.local_index(v2_array, 2).form)
assert (to_list(ak._do.local_index(v2_array, 3)) == [[[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]], [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]]])
assert (ak._do.local_index(v2_array.to_typetracer(), 3).form == ak._do.local_index(v2_array, 3).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [[[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]], [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]]])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
assert (to_list(ak._do.local_index(v2_array, (- 2))) == [[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 2)).form == ak._do.local_index(v2_array, (- 2)).form)
assert (to_list(ak._do.local_index(v2_array, (- 3))) == [[0, 1, 2], [0, 1, 2]])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 3)).form == ak._do.local_index(v2_array, (- 3)).form)
assert (to_list(ak._do.local_index(v2_array, (- 4))) == [0, 1])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 4)).form == ak._do.local_index(v2_array, (- 4)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 5))
with pytest.raises(IndexError):
ak._do.local_index(v2_array, 4)
v2_array = ak.highlevel.Array(ak.contents.RegularArray(ak.highlevel.Array([[1, 2, 3], [], [4, 5]]).layout, 0, zeros_length=0)).layout
assert (to_list(ak._do.local_index(v2_array, 0)) == [])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, 1)) == [])
assert (ak._do.local_index(v2_array.to_typetracer(), 1).form == ak._do.local_index(v2_array, 1).form)
assert (to_list(ak._do.local_index(v2_array, 2)) == [])
assert (ak._do.local_index(v2_array.to_typetracer(), 2).form == ak._do.local_index(v2_array, 2).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
assert (to_list(ak._do.local_index(v2_array, (- 2))) == [])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 2)).form == ak._do.local_index(v2_array, (- 2)).form)
assert (to_list(ak._do.local_index(v2_array, (- 3))) == [])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 3)).form == ak._do.local_index(v2_array, (- 3)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 4))
with pytest.raises(IndexError):
ak._do.local_index(v2_array, 3) |
def get_kernelf(config, context={}):
return _from_config(config, classes=classes, context=context) |
def floyd_warshall(A):
n = A.shape[0]
D = np.zeros((n, n), dtype=np.int16)
for i in range(n):
for j in range(n):
if (i == j):
pass
elif (A[(i, j)] == 0):
D[(i, j)] = 510
else:
D[(i, j)] = 1
for k in range(n):
for i in range(n):
for j in range(n):
old_dist = D[(i, j)]
new_dist = (D[(i, k)] + D[(k, j)])
if (new_dist < old_dist):
D[(i, j)] = new_dist
return D |
def adjust_pixel_dataset2(hi, wi, H, W):
wi = (W - wi)
if (wi < 0):
wi = (wi + W)
return (hi, wi) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.