code stringlengths 101 5.91M |
|---|
def default_convert(data):
elem_type = type(data)
if isinstance(data, torch.Tensor):
return data
elif ((elem_type.__module__ == 'numpy') and (elem_type.__name__ != 'str_') and (elem_type.__name__ != 'string_')):
if ((elem_type.__name__ == 'ndarray') and (np_str_obj_array_pattern.search(data.dtype.str) is not None)):
return data
return torch.as_tensor(data)
elif isinstance(data, container_abcs.Mapping):
return {key: default_convert(data[key]) for key in data}
elif (isinstance(data, tuple) and hasattr(data, '_fields')):
return elem_type((default_convert(d) for d in data))
elif (isinstance(data, container_abcs.Sequence) and (not isinstance(data, string_classes))):
return [default_convert(d) for d in data]
else:
return data |
def make_sll_loss_evaluator(cfg):
max_disp = cfg.model.losses.l1_loss.get('max_disp', None)
weights = cfg.model.losses.l1_loss.weights
sparse = cfg.data.sparse
return DispSmoothL1Loss(max_disp=max_disp, weights=weights, sparse=sparse) |
def add_config(_C):
_C.MODEL = CN()
_C.MODEL.CONV = CN()
_C.MODEL.CONV.TYPE = 'Conv2d'
_C.MODEL.CONV.ADD_BLOCKS = None
_C.MODEL.NORM = CN()
_C.MODEL.NORM.TYPE = 'BatchNorm2d'
_C.MODEL.NORM.SYNC_BN = False
_C.MODEL.NORM.FIX_BN = False
_C.MODEL.NORM.PARTIAL_BN = False
_C.MODEL.NORM.PRECISE_BN = False
_C.MODEL.NORM.NUM_BATCHES_PRECISE = 200
_C.MODEL.NORM.GROUPS = 32
_C.MODEL.ACT = CN()
_C.MODEL.ACT.TYPE = 'ReLU'
_C.MODEL.ACT.SIGMOID_TYPE = 'Sigmoid'
_C.MODEL.COMPRESSION = CN()
_C.MODEL.COMPRESSION.WIDTH_MULTIPLIER = 1.0
_C.MODEL.COMPRESSION.ROUND_NEAREST = 8
_C.MODEL.ATTENTION = CN()
_C.MODEL.ATTENTION.WITH_ATTENTION = False
_C.MODEL.ATTENTION.WITH_ATTENTIONS = (0, 0, 0, 0)
_C.MODEL.ATTENTION.REDUCTION = 16
_C.MODEL.ATTENTION.ATTENTION_TYPE = 'SqueezeAndExcitationBlock2D'
_C.MODEL.ATTENTION.BIAS = False
_C.MODEL.BACKBONE = CN()
_C.MODEL.BACKBONE.NAME = 'ShuffleNetV1'
_C.MODEL.BACKBONE.IN_PLANES = 3
_C.MODEL.BACKBONE.ARCH = 'resnet18'
_C.MODEL.BACKBONE.BASE_PLANES = 64
_C.MODEL.BACKBONE.LAYER_PLANES = (64, 128, 256, 512)
_C.MODEL.BACKBONE.DOWNSAMPLES = (0, 1, 1, 1)
_C.MODEL.BACKBONE.USE_AVG = False
_C.MODEL.BACKBONE.FAST_AVG = False
_C.MODEL.BACKBONE.STRIDES = (1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 2)
_C.MODEL.BACKBONE.CONV1_KERNEL = (1, 7, 7)
_C.MODEL.BACKBONE.CONV1_STRIDE = (1, 2, 2)
_C.MODEL.BACKBONE.CONV1_PADDING = (0, 3, 3)
_C.MODEL.BACKBONE.POOL1_KERNEL = (1, 3, 3)
_C.MODEL.BACKBONE.POOL1_STRIDE = (1, 2, 2)
_C.MODEL.BACKBONE.POOL1_PADDING = (0, 1, 1)
_C.MODEL.BACKBONE.WITH_POOL2 = False
_C.MODEL.BACKBONE.TEMPORAL_STRIDES = (1, 1, 1, 1)
_C.MODEL.BACKBONE.INFLATE_LIST = (0, 0, 0, 0)
_C.MODEL.BACKBONE.INFLATE_STYLE = '3x1x1'
_C.MODEL.BACKBONE.WITH_GROUPS = (0, 1, 1)
_C.MODEL.HEAD = CN()
_C.MODEL.HEAD.NAME = 'GeneralHead2D'
_C.MODEL.HEAD.FEATURE_DIMS = 1024
_C.MODEL.HEAD.DROPOUT_RATE = 0.0
_C.MODEL.HEAD.NUM_CLASSES = 1000
_C.MODEL.HEAD.BIAS = True
_C.MODEL.HEAD.INNER_DIMS = 1280
_C.MODEL.RECOGNIZER = CN()
_C.MODEL.RECOGNIZER.NAME = 'ShuffleNetV1'
_C.MODEL.RECOGNIZER.TYPE = 'ResNet'
_C.MODEL.RECOGNIZER.PRELOADED = ''
_C.MODEL.RECOGNIZER.PRETRAINED_REMOTE = True
_C.MODEL.RECOGNIZER.PRETRAINED_LOCAL = ''
_C.MODEL.RECOGNIZER.TORCHVISION_PRETRAINED = False
_C.MODEL.RECOGNIZER.PRETRAINED_NUM_CLASSES = 1000
_C.MODEL.RECOGNIZER.ZERO_INIT_RESIDUAL = False
_C.MODEL.CRITERION = CN()
_C.MODEL.CRITERION.NAME = 'CrossEntropyLoss'
_C.MODEL.CRITERION.SMOOTHING = 0.1
_C.MODEL.CRITERION.REDUCTION = 'mean' |
_module()
class FastRCNN(TwoStageDetector):
'Implementation of `Fast R-CNN <
def __init__(self, backbone, roi_head, train_cfg, test_cfg, neck=None, pretrained=None):
super(FastRCNN, self).__init__(backbone=backbone, neck=neck, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained)
def forward_test(self, imgs, img_metas, proposals, **kwargs):
for (var, name) in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if (not isinstance(var, list)):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if (num_augs != len(img_metas)):
raise ValueError(f'num of augmentations ({len(imgs)}) != num of image meta ({len(img_metas)})')
if (num_augs == 1):
return self.simple_test(imgs[0], img_metas[0], proposals[0], **kwargs)
else:
assert NotImplementedError |
_module()
class GQAComputeMetrics(BaseComputeMetrics):
def extract_target(self, string: str):
try:
found = ANS_EXTRACT_PAT.findall(string.strip())
if (len(found) != 1):
return None
return found[0].strip().rstrip('.').strip()
except (IndexError, AttributeError):
return None |
def inverse_data_transform(config, X):
if hasattr(config, 'image_mean'):
X = (X + config.image_mean.to(X.device)[(None, ...)])
if config.data.logit_transform:
X = torch.sigmoid(X)
elif config.data.rescaled:
X = ((X + 1.0) / 2.0)
return torch.clamp(X, 0.0, 1.0) |
def test_pretransform_nobatch():
q = torch.rand(495, 436, 8)
output = UNet.unet_pre_transform(data=torch.rand(12, 495, 436, 8), static_data=q, zeropad2d=None, batch_dim=False)
assert (output.shape == (((12 * 8) + 8), 495, 436)) |
def l2_to_ip(l2_score, query, max_norm=None):
query_norm = np.linalg.norm(query, axis=1, keepdims=True)
if (max_norm is None):
return ((- 0.5) * (l2_score - (query_norm ** 2)))
return ((- 0.5) * ((l2_score - (query_norm ** 2)) - (max_norm ** 2))) |
def isect_segments_include_segments(segments):
return isect_segments_impl(segments, include_segments=True) |
def download_and_extract_archive(url, download_root, extract_root=None, filename=None, md5=None, remove_finished=False):
download_root = os.path.expanduser(download_root)
if (extract_root is None):
extract_root = download_root
if (not filename):
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print('Extracting {} to {}'.format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished) |
def cobmine_all_coils(image, sensitivity):
combined = T.complex_multiply(sensitivity[(..., 0)], (- sensitivity[(..., 1)]), image[(..., 0)], image[(..., 1)])
return combined.sum(dim=0) |
def _equal(a, b):
if isinstance(a, (torch.Tensor, np.ndarray)):
return (a == b).all()
else:
return (a == b) |
class FastRCNNTest(unittest.TestCase):
def test_fast_rcnn(self):
torch.manual_seed(132)
cfg = get_cfg()
cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5)
box2box_transform = Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
box_head_output_size = 8
num_classes = 5
cls_agnostic_bbox_reg = False
box_predictor = FastRCNNOutputLayers(box_head_output_size, num_classes, cls_agnostic_bbox_reg, box_dim=4)
feature_pooled = torch.rand(2, box_head_output_size)
(pred_class_logits, pred_proposal_deltas) = box_predictor(feature_pooled)
image_shape = (10, 10)
proposal_boxes = torch.tensor([[0.8, 1.1, 3.2, 2.8], [2.3, 2.5, 7, 8]], dtype=torch.float32)
gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
result = Instances(image_shape)
result.proposal_boxes = Boxes(proposal_boxes)
result.gt_boxes = Boxes(gt_boxes)
result.gt_classes = torch.tensor([1, 2])
proposals = []
proposals.append(result)
smooth_l1_beta = cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA
outputs = FastRCNNOutputs(box2box_transform, pred_class_logits, pred_proposal_deltas, proposals, smooth_l1_beta)
with EventStorage():
losses = outputs.losses()
expected_losses = {'loss_cls': torch.tensor(1.), 'loss_box_reg': torch.tensor(4.)}
for name in expected_losses.keys():
assert torch.allclose(losses[name], expected_losses[name])
def test_fast_rcnn_rotated(self):
torch.manual_seed(132)
cfg = get_cfg()
cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5, 1)
box2box_transform = Box2BoxTransformRotated(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
box_head_output_size = 8
num_classes = 5
cls_agnostic_bbox_reg = False
box_predictor = FastRCNNOutputLayers(box_head_output_size, num_classes, cls_agnostic_bbox_reg, box_dim=5)
feature_pooled = torch.rand(2, box_head_output_size)
(pred_class_logits, pred_proposal_deltas) = box_predictor(feature_pooled)
image_shape = (10, 10)
proposal_boxes = torch.tensor([[2, 1.95, 2.4, 1.7, 0], [4.65, 5.25, 4.7, 5.5, 0]], dtype=torch.float32)
gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32)
result = Instances(image_shape)
result.proposal_boxes = RotatedBoxes(proposal_boxes)
result.gt_boxes = RotatedBoxes(gt_boxes)
result.gt_classes = torch.tensor([1, 2])
proposals = []
proposals.append(result)
smooth_l1_beta = cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA
outputs = RotatedFastRCNNOutputs(box2box_transform, pred_class_logits, pred_proposal_deltas, proposals, smooth_l1_beta)
with EventStorage():
losses = outputs.losses()
expected_losses = {'loss_cls': torch.tensor(1.), 'loss_box_reg': torch.tensor(4.)}
for name in expected_losses.keys():
assert torch.allclose(losses[name], expected_losses[name]) |
def _add_categories_metadata(dataset_name: str) -> None:
metadict = get_lvis_instances_meta(dataset_name)
categories = metadict['thing_classes']
metadata = MetadataCatalog.get(dataset_name)
metadata.categories = {(i + 1): categories[i] for i in range(len(categories))}
logger = logging.getLogger(__name__)
logger.info(f'Dataset {dataset_name} has {len(categories)} categories') |
class NDCG_relevance():
def __init__(self, length=20):
self.length = length
def init(self, train):
self.train = train
return
def reset(self):
self.test = 0
self.pos = 0
def skip(self, for_item=0, session=(- 1)):
pass
def set_buys(self, buys, test_set):
self.buys = buys
self.test_set = test_set
buys_filterd = buys[buys['SessionId'].isin(self.train['SessionId'])]
self.ratio_buys = (len(buys_filterd) / len(self.train))
return
def add_multiple(self, result, next_items, for_item=0, session=0, position=None):
dcg = self.dcg(result[:self.length].index, next_items, session, position)
dcg_max = self.dcg(next_items[:self.length], next_items, session, position)
self.pos += (dcg / dcg_max)
self.test += 1
def add(self, result, next_item, for_item=0, session=0, pop_bin=None, position=None):
self.add_multiple(result, [next_item], session)
def dcg(self, result, next_items, session, position):
res = 0
rel = 1
rel_buy = self.ratio_buys
rel_count_next_items = 1
rel_click = 1
ranked_list_len = min(len(result), self.length)
next_items = list(next_items)
for i in range(ranked_list_len):
if (result[i] in next_items):
b = self.buys.loc[(self.buys['SessionId'] == session)].ItemId.values
r = result[i]
if (result[i] in self.buys.loc[(self.buys['SessionId'] == session)].ItemId.values):
rel += rel_buy
rel += (next_items.count(result[i]) * rel_count_next_items)
session_rows = self.test_set.loc[(self.test_set['SessionId'] == session)]
previous_items = session_rows.iloc[:position]
if (result[i] in previous_items.ItemId.values):
rel += rel_click
if (i == 0):
res += rel
else:
res += (rel / np.log2((i + 1)))
rel = 0
return res
def sortFunc(e):
return e.values
def add_batch(self, result, next_item):
i = 0
for (part, series) in result.iteritems():
result.sort_values(part, ascending=False, inplace=True)
self.add(series, next_item[i])
i += 1
def result(self):
return ((('NDCG_' + str(self.length)) + ': '), (self.pos / self.test)) |
def _is_float_str(str_number):
if (not str_number):
return False
try:
float(str_number)
return True
except ValueError:
return False |
class AdaptiveParamNoiseSpec(object):
def __init__(self, initial_stddev=0.1, desired_action_stddev=0.1, adoption_coefficient=1.01):
self.initial_stddev = initial_stddev
self.desired_action_stddev = desired_action_stddev
self.adoption_coefficient = adoption_coefficient
self.current_stddev = initial_stddev
def adapt(self, distance):
if (distance > self.desired_action_stddev):
self.current_stddev /= self.adoption_coefficient
else:
self.current_stddev *= self.adoption_coefficient
def get_stats(self):
stats = {'param_noise_stddev': self.current_stddev}
return stats
def __repr__(self):
fmt = 'AdaptiveParamNoiseSpec(initial_stddev={}, desired_action_stddev={}, adoption_coefficient={})'
return fmt.format(self.initial_stddev, self.desired_action_stddev, self.adoption_coefficient) |
class VarData(object):
def __init__(self, params=None, lhs=None, ret=None):
super().__init__()
self.params = params
self.lhs = lhs
self.ret = ret |
class Sorting2TaskDefinition(DefaultTaskDefinition):
tray_dir = 'tray'
tray_urdf = 'traybox.urdf'
spawn_pos_min = np.array([(- 0.4), (- 0.25), 0.1])
spawn_pos_max = np.array([(- 0.65), 0.25, 0.155])
spawn_pos_delta = (spawn_pos_max - spawn_pos_min)
tray_poses = [np.array([(- 0.5), 0.0, 0.0]), np.array([0.0, (+ 0.6), 0.0]), np.array([(- 1.0), (- 0.6), 0.0])]
block_urdf = '%s.urdf'
model = 'block'
blocks = ['red', 'blue', 'yellow', 'green']
stack_pos = [np.array([(- 0.5), 0.1, 0.0]), np.array([(- 0.5), 0.2, 0.0]), np.array([(- 0.5), (- 0.1), 0.0]), np.array([(- 0.5), (- 0.2), 0.0]), np.array([(- 0.1), 0.1, 0.0]), np.array([(- 0.1), 0.2, 0.0]), np.array([(- 0.1), (- 0.1), 0.0]), np.array([(- 0.1), (- 0.2), 0.0]), np.array([(- 0), 0.1, 0.0]), np.array([(- 0), 0.2, 0.0]), np.array([(- 0), (- 0.1), 0.0]), np.array([(- 0), (- 0.2), 0.0])]
over_final_stack_pos = np.array([(- 0.5), 0.0, 0.5])
final_stack_pos = np.array([(- 0.5), 0.0, 0.05])
grasp_q = ((- 0.27), 0.65, 0.65, 0.27)
def __init__(self, stage, *args, **kwargs):
super(Sorting2TaskDefinition, self).__init__(*args, **kwargs)
self.stage = stage
self.block_ids = []
def _makeTask(self):
AlignOption = (lambda goal: GoalDirectedMotionOption(self.world, goal, pose=((0.05, 0, 0.05), self.grasp_q), pose_tolerance=(0.025, 0.025), joint_velocity_tolerance=0.05))
align_args = {'constructor': AlignOption, 'args': ['block'], 'remap': {'block': 'goal'}}
GraspOption = (lambda goal: GoalDirectedMotionOption(self.world, goal, pose=((0.0, 0, 0.0), self.grasp_q), pose_tolerance=(0.025, 0.025), joint_velocity_tolerance=0.05))
grasp_args = {'constructor': GraspOption, 'args': ['block'], 'remap': {'block': 'goal'}}
LiftOption = (lambda : GeneralMotionOption(pose=(self.over_final_stack_pos, self.grasp_q), pose_tolerance=(0.025, 0.025), joint_velocity_tolerance=0.05))
lift_args = {'constructor': LiftOption, 'args': []}
PlaceOption = (lambda : GeneralMotionOption(pose=(self.final_stack_pos, self.grasp_q), pose_tolerance=(0.025, 0.025), joint_velocity_tolerance=0.05))
place_args = {'constructor': PlaceOption, 'args': []}
close_gripper_args = {'constructor': CloseGripperOption, 'args': []}
open_gripper_args = {'constructor': OpenGripperOption, 'args': []}
task = Task()
task.add('align', None, align_args)
task.add('grasp', 'align', grasp_args)
task.add('close_gripper', 'grasp', close_gripper_args)
task.add('lift', 'close_gripper', lift_args)
task.add('place', 'lift', place_args)
task.add('open_gripper', 'place', open_gripper_args)
task.add('done', 'open_gripper', lift_args)
return task
def _addTower(self, pos, blocks, urdf_dir):
z = 0.025
ids = []
for block in blocks:
urdf_filename = os.path.join(urdf_dir, self.model, (self.block_urdf % block))
obj_id = pb.loadURDF(urdf_filename)
pb.resetBasePositionAndOrientation(obj_id, (pos[0], pos[1], z), (0, 0, 0, 1))
self.addObject('block', ('%s_block' % block), obj_id)
z += 0.05
ids.append(obj_id)
return ids
def _setup(self):
rospack = rospkg.RosPack()
path = rospack.get_path('costar_objects')
urdf_dir = os.path.join(path, self.urdf_dir)
tray_filename = os.path.join(urdf_dir, self.tray_dir, self.tray_urdf)
for position in self.tray_poses:
obj_id = pb.loadURDF(tray_filename)
pb.resetBasePositionAndOrientation(obj_id, position, (0, 0, 0, 1))
placement = np.array(range(len(self.stack_pos)))
np.random.shuffle(placement)
for (i, pos) in enumerate(self.stack_pos):
blocks = []
for (idx, block) in zip(placement, self.blocks):
if (idx == i):
blocks.append(block)
ids = self._addTower(pos, blocks, urdf_dir)
self.block_ids += ids
self.world.addCondition(JointLimitViolationCondition(), (- 100), 'joints must stay in limits')
self.world.addCondition(TimeCondition(10.0), (- 100), 'time limit reached')
self.world.reward = EuclideanReward('red_block')
if (self.stage == 0):
threshold = 0.035
self.world.addCondition(ObjectAtPositionCondition('red_block', self.final_stack_pos, threshold), 100, 'block in right position')
self.world.addCondition(ObjectAtPositionCondition('blue_block', self.final_stack_pos, threshold), 50, 'wrong block')
self.world.addCondition(ObjectAtPositionCondition('green_block', self.final_stack_pos, threshold), 50, 'wrong block')
self.world.addCondition(ObjectAtPositionCondition('yellow_block', self.final_stack_pos, threshold), 50, 'wrong block')
def reset(self):
placement = np.array(range(len(self.stack_pos)))
np.random.shuffle(placement)
self.world.done = False
self.world.ticks = 0
for (i, pos) in enumerate(self.stack_pos):
blocks = []
for (idx, block) in zip(placement, self.block_ids):
if (idx == i):
blocks.append(block)
z = 0.025
for block_id in blocks:
pb.resetBasePositionAndOrientation(block_id, (pos[0], pos[1], z), (0, 0, 0, 1))
z += 0.05
self._setupRobot(self.robot.handle)
def getName(self):
return 'sorting2' |
class ResNet(nn.Module):
def __init__(self, block, layers):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.bn1 = nn.InstanceNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.layer5 = self._make_layer(block, 512, layers[4], stride=2)
self.avgpool = nn.AvgPool2d(4, stride=1)
self.fc = nn.Linear((512 * block.expansion), 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride), nn.InstanceNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers) |
def common_tangent_radian(r1, r2, d):
alpha = math.acos((abs((r2 - r1)) / d))
alpha = (alpha if (r1 > r2) else (pi - alpha))
return alpha |
class LaplacianKernel(Kernel):
def __init__(self) -> None:
super(LaplacianKernel, self).__init__()
def similarity(self, distances: torch.Tensor, bandwidth: Union[(float, torch.Tensor)]) -> torch.Tensor:
return ((- torch.sqrt(distances)) / bandwidth) |
class Logger():
def __init__(self, config):
self.config = config
if ('env' in self.config.keys()):
self.init_pretrain_logger()
else:
self.init_downstream_logger()
self.init_training_log()
def init_pretrain_logger(self):
if (self.config.env.experiment_id is None):
self.experiment_id = self.get_timestamp()
OmegaConf.update(self.config, 'env.experiment_id', self.experiment_id)
else:
self.experiment_id = self.config.env.experiment_id
self.experiment_dir = os.path.join(self.config.env.experiments_dir, self.experiment_id)
self.vocab_path = os.path.join(self.experiment_dir, 'vocab.json')
self.checkpoint_path = os.path.join(self.experiment_dir, 'checkpoint.pth.tar')
def init_downstream_logger(self):
if (self.config.downstream_experiment_id is None):
self.experiment_id = self.get_timestamp()
OmegaConf.update(self.config, 'downstream_experiment_id', self.experiment_id)
else:
self.experiment_id = self.config.downstream_experiment_id
self.experiment_dir = os.path.join(self.config.downstream_dir, self.experiment_id)
self.checkpoint_path = os.path.join(self.experiment_dir, 'checkpoint.pth.tar')
def init_training_log(self):
self.log_filename = os.path.join(self.experiment_dir, 'train_log.tsv')
if (not os.path.exists(self.experiment_dir)):
os.makedirs(self.experiment_dir)
log_file = open(self.log_filename, 'a')
log_file.write('Epoch\ttrain_loss\tval_loss\tmetric\tepoch_time\tlearing_rate\ttime_stamp\n')
log_file.close()
def save_config(self):
config_path = os.path.join(self.experiment_dir, 'config.yaml')
if (not os.path.exists(config_path)):
OmegaConf.save(self.config, config_path)
def get_timestamp(self):
return str(time.strftime('%Y-%m-%d-%H_%M_%S', time.gmtime()))
def write(self, text):
print(text)
def save_vocab(self, token_freq):
save_json(self.vocab_path, token_freq)
def update_training_log(self, epoch, train_loss, val_loss, epoch_time, learning_rate, metric=0):
time_stamp = str(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime()))
self.write(('Epoch %d, train loss %g, val loss %g, metric %g, epoch-time %gs, lr %g, time-stamp %s' % (epoch, train_loss, val_loss, metric, epoch_time, learning_rate, time_stamp)))
log_file = open(self.log_filename, 'a')
log_file.write(('%d\t%g\t%g\t%g\t%gs\t%g\t%s\n' % (epoch, train_loss, val_loss, metric, epoch_time, learning_rate, time_stamp)))
log_file.close()
def save_checkpoint(self, state, is_best=False):
torch.save(state, self.checkpoint_path)
if is_best:
self.write('Saving best model so far')
best_model_path = os.path.join(self.experiment_dir, 'best_model.pth.tar')
torch.save(state, best_model_path) |
def get_split_template_mesh_dataset(cat_id, edge_length_threshold):
return SplitTemplateMeshManager(cat_id, edge_length_threshold).get_saved_dataset() |
class TestLoss(unittest.TestCase):
def test_run_torsion_angle_loss(self):
batch_size = consts.batch_size
n_res = consts.n_res
a = torch.rand((batch_size, n_res, 7, 2))
a_gt = torch.rand((batch_size, n_res, 7, 2))
a_alt_gt = torch.rand((batch_size, n_res, 7, 2))
loss = torsion_angle_loss(a, a_gt, a_alt_gt)
def test_run_fape(self):
batch_size = consts.batch_size
n_frames = 7
n_atoms = 5
x = torch.rand((batch_size, n_atoms, 3))
x_gt = torch.rand((batch_size, n_atoms, 3))
rots = torch.rand((batch_size, n_frames, 3, 3))
rots_gt = torch.rand((batch_size, n_frames, 3, 3))
trans = torch.rand((batch_size, n_frames, 3))
trans_gt = torch.rand((batch_size, n_frames, 3))
t = Rigid(Rotation(rot_mats=rots), trans)
t_gt = Rigid(Rotation(rot_mats=rots_gt), trans_gt)
frames_mask = torch.randint(0, 2, (batch_size, n_frames)).float()
positions_mask = torch.randint(0, 2, (batch_size, n_atoms)).float()
length_scale = 10
loss = compute_fape(pred_frames=t, target_frames=t_gt, frames_mask=frames_mask, pred_positions=x, target_positions=x_gt, positions_mask=positions_mask, length_scale=length_scale)
def test_run_between_residue_bond_loss(self):
bs = consts.batch_size
n = consts.n_res
pred_pos = torch.rand(bs, n, 14, 3)
pred_atom_mask = torch.randint(0, 2, (bs, n, 14))
residue_index = torch.arange(n).unsqueeze(0)
aatype = torch.randint(0, 22, (bs, n))
between_residue_bond_loss(pred_pos, pred_atom_mask, residue_index, aatype)
_utils.skip_unless_alphafold_installed()
def test_between_residue_bond_loss_compare(self):
def run_brbl(pred_pos, pred_atom_mask, residue_index, aatype):
return alphafold.model.all_atom.between_residue_bond_loss(pred_pos, pred_atom_mask, residue_index, aatype)
f = hk.transform(run_brbl)
n_res = consts.n_res
pred_pos = np.random.rand(n_res, 14, 3).astype(np.float32)
pred_atom_mask = np.random.randint(0, 2, (n_res, 14)).astype(np.float32)
residue_index = np.arange(n_res)
aatype = np.random.randint(0, 22, (n_res,))
out_gt = f.apply({}, None, pred_pos, pred_atom_mask, residue_index, aatype)
out_gt = jax.tree_map((lambda x: x.block_until_ready()), out_gt)
out_gt = jax.tree_map((lambda x: torch.tensor(np.copy(x))), out_gt)
out_repro = between_residue_bond_loss(torch.tensor(pred_pos).cuda(), torch.tensor(pred_atom_mask).cuda(), torch.tensor(residue_index).cuda(), torch.tensor(aatype).cuda())
out_repro = tensor_tree_map((lambda x: x.cpu()), out_repro)
for k in out_gt.keys():
self.assertTrue((torch.max(torch.abs((out_gt[k] - out_repro[k]))) < consts.eps))
def test_run_between_residue_clash_loss(self):
bs = consts.batch_size
n = consts.n_res
pred_pos = torch.rand(bs, n, 14, 3)
pred_atom_mask = torch.randint(0, 2, (bs, n, 14)).float()
atom14_atom_radius = torch.rand(bs, n, 14)
residue_index = torch.arange(n).unsqueeze(0)
loss = between_residue_clash_loss(pred_pos, pred_atom_mask, atom14_atom_radius, residue_index)
_utils.skip_unless_alphafold_installed()
def test_between_residue_clash_loss_compare(self):
def run_brcl(pred_pos, atom_exists, atom_radius, res_ind):
return alphafold.model.all_atom.between_residue_clash_loss(pred_pos, atom_exists, atom_radius, res_ind)
f = hk.transform(run_brcl)
n_res = consts.n_res
pred_pos = np.random.rand(n_res, 14, 3).astype(np.float32)
atom_exists = np.random.randint(0, 2, (n_res, 14)).astype(np.float32)
atom_radius = np.random.rand(n_res, 14).astype(np.float32)
res_ind = np.arange(n_res)
out_gt = f.apply({}, None, pred_pos, atom_exists, atom_radius, res_ind)
out_gt = jax.tree_map((lambda x: x.block_until_ready()), out_gt)
out_gt = jax.tree_map((lambda x: torch.tensor(np.copy(x))), out_gt)
out_repro = between_residue_clash_loss(torch.tensor(pred_pos).cuda(), torch.tensor(atom_exists).cuda(), torch.tensor(atom_radius).cuda(), torch.tensor(res_ind).cuda())
out_repro = tensor_tree_map((lambda x: x.cpu()), out_repro)
for k in out_gt.keys():
self.assertTrue((torch.max(torch.abs((out_gt[k] - out_repro[k]))) < consts.eps))
_utils.skip_unless_alphafold_installed()
def test_compute_plddt_compare(self):
n_res = consts.n_res
logits = np.random.rand(n_res, 50)
out_gt = alphafold.common.confidence.compute_plddt(logits)
out_gt = torch.tensor(out_gt)
logits_t = torch.tensor(logits)
out_repro = compute_plddt(logits_t)
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
def test_find_structural_violations(self):
n = consts.n_res
batch = {'atom14_atom_exists': torch.randint(0, 2, (n, 14)), 'residue_index': torch.arange(n), 'aatype': torch.randint(0, 20, (n,)), 'residx_atom14_to_atom37': torch.randint(0, 37, (n, 14)).long()}
pred_pos = torch.rand(n, 14, 3)
config = {'clash_overlap_tolerance': 1.5, 'violation_tolerance_factor': 12.0}
find_structural_violations(batch, pred_pos, **config)
_utils.skip_unless_alphafold_installed()
def test_find_structural_violations_compare(self):
def run_fsv(batch, pos, config):
cwd = os.getcwd()
os.chdir('tests/test_data')
loss = alphafold.model.folding.find_structural_violations(batch, pos, config)
os.chdir(cwd)
return loss
f = hk.transform(run_fsv)
n_res = consts.n_res
batch = {'atom14_atom_exists': np.random.randint(0, 2, (n_res, 14)), 'residue_index': np.arange(n_res), 'aatype': np.random.randint(0, 20, (n_res,)), 'residx_atom14_to_atom37': np.random.randint(0, 37, (n_res, 14)).astype(np.int64)}
pred_pos = np.random.rand(n_res, 14, 3)
config = mlc.ConfigDict({'clash_overlap_tolerance': 1.5, 'violation_tolerance_factor': 12.0})
out_gt = f.apply({}, None, batch, pred_pos, config)
out_gt = jax.tree_map((lambda x: x.block_until_ready()), out_gt)
out_gt = jax.tree_map((lambda x: torch.tensor(np.copy(x))), out_gt)
batch = tree_map((lambda x: torch.tensor(x).cuda()), batch, np.ndarray)
out_repro = find_structural_violations(batch, torch.tensor(pred_pos).cuda(), **config)
out_repro = tensor_tree_map((lambda x: x.cpu()), out_repro)
def compare(out):
(gt, repro) = out
assert (torch.max(torch.abs((gt - repro))) < consts.eps)
dict_multimap(compare, [out_gt, out_repro])
_utils.skip_unless_alphafold_installed()
def test_compute_renamed_ground_truth_compare(self):
def run_crgt(batch, atom14_pred_pos):
return alphafold.model.folding.compute_renamed_ground_truth(batch, atom14_pred_pos)
f = hk.transform(run_crgt)
n_res = consts.n_res
batch = {'seq_mask': np.random.randint(0, 2, (n_res,)).astype(np.float32), 'aatype': np.random.randint(0, 20, (n_res,)), 'atom14_gt_positions': np.random.rand(n_res, 14, 3), 'atom14_gt_exists': np.random.randint(0, 2, (n_res, 14)).astype(np.float32), 'all_atom_mask': np.random.randint(0, 2, (n_res, 37)).astype(np.float32), 'all_atom_positions': np.random.rand(n_res, 37, 3).astype(np.float32)}
def _build_extra_feats_np():
b = tree_map((lambda n: torch.tensor(n)), batch, np.ndarray)
b = data_transforms.make_atom14_masks(b)
b = data_transforms.make_atom14_positions(b)
return tensor_tree_map((lambda t: np.array(t)), b)
batch = _build_extra_feats_np()
atom14_pred_pos = np.random.rand(n_res, 14, 3)
out_gt = f.apply({}, None, batch, atom14_pred_pos)
out_gt = jax.tree_map((lambda x: torch.tensor(np.array(x))), out_gt)
batch = tree_map((lambda x: torch.tensor(x).cuda()), batch, np.ndarray)
atom14_pred_pos = torch.tensor(atom14_pred_pos).cuda()
out_repro = compute_renamed_ground_truth(batch, atom14_pred_pos)
out_repro = tensor_tree_map((lambda t: t.cpu()), out_repro)
for k in out_repro:
self.assertTrue((torch.max(torch.abs((out_gt[k] - out_repro[k]))) < consts.eps))
_utils.skip_unless_alphafold_installed()
def test_msa_loss_compare(self):
def run_msa_loss(value, batch):
config = compare_utils.get_alphafold_config()
msa_head = alphafold.model.modules.MaskedMsaHead(config.model.heads.masked_msa, config.model.global_config)
return msa_head.loss(value, batch)
f = hk.transform(run_msa_loss)
n_res = consts.n_res
n_seq = consts.n_seq
value = {'logits': np.random.rand(n_res, n_seq, 23).astype(np.float32)}
batch = {'true_msa': np.random.randint(0, 21, (n_res, n_seq)), 'bert_mask': np.random.randint(0, 2, (n_res, n_seq)).astype(np.float32)}
out_gt = f.apply({}, None, value, batch)['loss']
out_gt = torch.tensor(np.array(out_gt))
value = tree_map((lambda x: torch.tensor(x).cuda()), value, np.ndarray)
batch = tree_map((lambda x: torch.tensor(x).cuda()), batch, np.ndarray)
with torch.no_grad():
out_repro = masked_msa_loss(value['logits'], **batch)
out_repro = tensor_tree_map((lambda t: t.cpu()), out_repro)
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
_utils.skip_unless_alphafold_installed()
def test_distogram_loss_compare(self):
config = compare_utils.get_alphafold_config()
c_distogram = config.model.heads.distogram
def run_distogram_loss(value, batch):
dist_head = alphafold.model.modules.DistogramHead(c_distogram, config.model.global_config)
return dist_head.loss(value, batch)
f = hk.transform(run_distogram_loss)
n_res = consts.n_res
value = {'logits': np.random.rand(n_res, n_res, c_distogram.num_bins).astype(np.float32), 'bin_edges': np.linspace(c_distogram.first_break, c_distogram.last_break, c_distogram.num_bins)}
batch = {'pseudo_beta': np.random.rand(n_res, 3).astype(np.float32), 'pseudo_beta_mask': np.random.randint(0, 2, (n_res,))}
out_gt = f.apply({}, None, value, batch)['loss']
out_gt = torch.tensor(np.array(out_gt))
value = tree_map((lambda x: torch.tensor(x).cuda()), value, np.ndarray)
batch = tree_map((lambda x: torch.tensor(x).cuda()), batch, np.ndarray)
with torch.no_grad():
out_repro = distogram_loss(logits=value['logits'], min_bin=c_distogram.first_break, max_bin=c_distogram.last_break, no_bins=c_distogram.num_bins, **batch)
out_repro = tensor_tree_map((lambda t: t.cpu()), out_repro)
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
_utils.skip_unless_alphafold_installed()
def test_experimentally_resolved_loss_compare(self):
config = compare_utils.get_alphafold_config()
c_experimentally_resolved = config.model.heads.experimentally_resolved
def run_experimentally_resolved_loss(value, batch):
er_head = alphafold.model.modules.ExperimentallyResolvedHead(c_experimentally_resolved, config.model.global_config)
return er_head.loss(value, batch)
f = hk.transform(run_experimentally_resolved_loss)
n_res = consts.n_res
value = {'logits': np.random.rand(n_res, 37).astype(np.float32)}
batch = {'all_atom_mask': np.random.randint(0, 2, (n_res, 37)), 'atom37_atom_exists': np.random.randint(0, 2, (n_res, 37)), 'resolution': np.array(1.0)}
out_gt = f.apply({}, None, value, batch)['loss']
out_gt = torch.tensor(np.array(out_gt))
value = tree_map((lambda x: torch.tensor(x).cuda()), value, np.ndarray)
batch = tree_map((lambda x: torch.tensor(x).cuda()), batch, np.ndarray)
with torch.no_grad():
out_repro = experimentally_resolved_loss(logits=value['logits'], min_resolution=c_experimentally_resolved.min_resolution, max_resolution=c_experimentally_resolved.max_resolution, **batch)
out_repro = tensor_tree_map((lambda t: t.cpu()), out_repro)
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
_utils.skip_unless_alphafold_installed()
def test_supervised_chi_loss_compare(self):
config = compare_utils.get_alphafold_config()
c_chi_loss = config.model.heads.structure_module
def run_supervised_chi_loss(value, batch):
ret = {'loss': jax.numpy.array(0.0)}
alphafold.model.folding.supervised_chi_loss(ret, batch, value, c_chi_loss)
return ret['loss']
f = hk.transform(run_supervised_chi_loss)
n_res = consts.n_res
value = {'sidechains': {'angles_sin_cos': np.random.rand(8, n_res, 7, 2).astype(np.float32), 'unnormalized_angles_sin_cos': np.random.rand(8, n_res, 7, 2).astype(np.float32)}}
batch = {'aatype': np.random.randint(0, 21, (n_res,)), 'seq_mask': np.random.randint(0, 2, (n_res,)), 'chi_mask': np.random.randint(0, 2, (n_res, 4)), 'chi_angles': np.random.rand(n_res, 4).astype(np.float32)}
out_gt = f.apply({}, None, value, batch)
out_gt = torch.tensor(np.array(out_gt.block_until_ready()))
value = tree_map((lambda x: torch.tensor(x).cuda()), value, np.ndarray)
batch = tree_map((lambda x: torch.tensor(x).cuda()), batch, np.ndarray)
batch['chi_angles_sin_cos'] = torch.stack([torch.sin(batch['chi_angles']), torch.cos(batch['chi_angles'])], dim=(- 1))
with torch.no_grad():
out_repro = supervised_chi_loss(chi_weight=c_chi_loss.chi_weight, angle_norm_weight=c_chi_loss.angle_norm_weight, **{**batch, **value['sidechains']})
out_repro = tensor_tree_map((lambda t: t.cpu()), out_repro)
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
_utils.skip_unless_alphafold_installed()
def test_violation_loss_compare(self):
config = compare_utils.get_alphafold_config()
c_viol = config.model.heads.structure_module
def run_viol_loss(batch, atom14_pred_pos):
ret = {'loss': np.array(0.0).astype(np.float32)}
value = {}
value['violations'] = alphafold.model.folding.find_structural_violations(batch, atom14_pred_pos, c_viol)
alphafold.model.folding.structural_violation_loss(ret, batch, value, c_viol)
return ret['loss']
f = hk.transform(run_viol_loss)
n_res = consts.n_res
batch = {'seq_mask': np.random.randint(0, 2, (n_res,)).astype(np.float32), 'residue_index': np.arange(n_res), 'aatype': np.random.randint(0, 21, (n_res,))}
alphafold.model.tf.data_transforms.make_atom14_masks(batch)
batch = {k: np.array(v) for (k, v) in batch.items()}
atom14_pred_pos = np.random.rand(n_res, 14, 3).astype(np.float32)
out_gt = f.apply({}, None, batch, atom14_pred_pos)
out_gt = torch.tensor(np.array(out_gt.block_until_ready()))
batch = tree_map((lambda n: torch.tensor(n).cuda()), batch, np.ndarray)
atom14_pred_pos = torch.tensor(atom14_pred_pos).cuda()
batch = data_transforms.make_atom14_masks(batch)
out_repro = violation_loss(find_structural_violations(batch, atom14_pred_pos, **c_viol), **batch)
out_repro = out_repro.cpu()
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
_utils.skip_unless_alphafold_installed()
def test_lddt_loss_compare(self):
config = compare_utils.get_alphafold_config()
c_plddt = config.model.heads.predicted_lddt
def run_plddt_loss(value, batch):
head = alphafold.model.modules.PredictedLDDTHead(c_plddt, config.model.global_config)
return head.loss(value, batch)
f = hk.transform(run_plddt_loss)
n_res = consts.n_res
value = {'predicted_lddt': {'logits': np.random.rand(n_res, c_plddt.num_bins).astype(np.float32)}, 'structure_module': {'final_atom_positions': np.random.rand(n_res, 37, 3).astype(np.float32)}}
batch = {'all_atom_positions': np.random.rand(n_res, 37, 3).astype(np.float32), 'all_atom_mask': np.random.randint(0, 2, (n_res, 37)).astype(np.float32), 'resolution': np.array(1.0).astype(np.float32)}
out_gt = f.apply({}, None, value, batch)
out_gt = torch.tensor(np.array(out_gt['loss']))
to_tensor = (lambda t: torch.tensor(t).cuda())
value = tree_map(to_tensor, value, np.ndarray)
batch = tree_map(to_tensor, batch, np.ndarray)
out_repro = lddt_loss(logits=value['predicted_lddt']['logits'], all_atom_pred_pos=value['structure_module']['final_atom_positions'], **{**batch, **c_plddt})
out_repro = out_repro.cpu()
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
_utils.skip_unless_alphafold_installed()
def test_backbone_loss_compare(self):
config = compare_utils.get_alphafold_config()
c_sm = config.model.heads.structure_module
def run_bb_loss(batch, value):
ret = {'loss': np.array(0.0)}
alphafold.model.folding.backbone_loss(ret, batch, value, c_sm)
return ret['loss']
f = hk.transform(run_bb_loss)
n_res = consts.n_res
batch = {'backbone_affine_tensor': random_affines_vector((n_res,)), 'backbone_affine_mask': np.random.randint(0, 2, (n_res,)).astype(np.float32), 'use_clamped_fape': np.array(0.0)}
value = {'traj': random_affines_vector((c_sm.num_layer, n_res))}
out_gt = f.apply({}, None, batch, value)
out_gt = torch.tensor(np.array(out_gt.block_until_ready()))
to_tensor = (lambda t: torch.tensor(t).cuda())
batch = tree_map(to_tensor, batch, np.ndarray)
value = tree_map(to_tensor, value, np.ndarray)
batch['backbone_rigid_tensor'] = affine_vector_to_4x4(batch['backbone_affine_tensor'])
batch['backbone_rigid_mask'] = batch['backbone_affine_mask']
out_repro = backbone_loss(traj=value['traj'], **{**batch, **c_sm})
out_repro = out_repro.cpu()
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
_utils.skip_unless_alphafold_installed()
def test_sidechain_loss_compare(self):
config = compare_utils.get_alphafold_config()
c_sm = config.model.heads.structure_module
def run_sidechain_loss(batch, value, atom14_pred_positions):
batch = {**batch, **alphafold.model.all_atom.atom37_to_frames(batch['aatype'], batch['all_atom_positions'], batch['all_atom_mask'])}
v = {}
v['sidechains'] = {}
v['sidechains']['frames'] = alphafold.model.r3.rigids_from_tensor4x4(value['sidechains']['frames'])
v['sidechains']['atom_pos'] = alphafold.model.r3.vecs_from_tensor(value['sidechains']['atom_pos'])
v.update(alphafold.model.folding.compute_renamed_ground_truth(batch, atom14_pred_positions))
value = v
ret = alphafold.model.folding.sidechain_loss(batch, value, c_sm)
return ret['loss']
f = hk.transform(run_sidechain_loss)
n_res = consts.n_res
batch = {'seq_mask': np.random.randint(0, 2, (n_res,)).astype(np.float32), 'aatype': np.random.randint(0, 20, (n_res,)), 'atom14_gt_positions': np.random.rand(n_res, 14, 3).astype(np.float32), 'atom14_gt_exists': np.random.randint(0, 2, (n_res, 14)).astype(np.float32), 'all_atom_positions': np.random.rand(n_res, 37, 3).astype(np.float32), 'all_atom_mask': np.random.randint(0, 2, (n_res, 37)).astype(np.float32)}
def _build_extra_feats_np():
b = tree_map((lambda n: torch.tensor(n)), batch, np.ndarray)
b = data_transforms.make_atom14_masks(b)
b = data_transforms.make_atom14_positions(b)
return tensor_tree_map((lambda t: np.array(t)), b)
batch = _build_extra_feats_np()
value = {'sidechains': {'frames': random_affines_4x4((c_sm.num_layer, n_res, 8)), 'atom_pos': np.random.rand(c_sm.num_layer, n_res, 14, 3).astype(np.float32)}}
atom14_pred_pos = np.random.rand(n_res, 14, 3).astype(np.float32)
out_gt = f.apply({}, None, batch, value, atom14_pred_pos)
out_gt = torch.tensor(np.array(out_gt.block_until_ready()))
to_tensor = (lambda t: torch.tensor(t).cuda())
batch = tree_map(to_tensor, batch, np.ndarray)
value = tree_map(to_tensor, value, np.ndarray)
atom14_pred_pos = to_tensor(atom14_pred_pos)
batch = data_transforms.atom37_to_frames(batch)
batch.update(compute_renamed_ground_truth(batch, atom14_pred_pos))
out_repro = sidechain_loss(sidechain_frames=value['sidechains']['frames'], sidechain_atom_pos=value['sidechains']['atom_pos'], **{**batch, **c_sm})
out_repro = out_repro.cpu()
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps))
_utils.skip_unless_alphafold_installed()
def test_tm_loss_compare(self):
config = compare_utils.get_alphafold_config()
c_tm = config.model.heads.predicted_aligned_error
def run_tm_loss(representations, batch, value):
head = alphafold.model.modules.PredictedAlignedErrorHead(c_tm, config.model.global_config)
v = {}
v.update(value)
v['predicted_aligned_error'] = head(representations, batch, False)
return head.loss(v, batch)['loss']
f = hk.transform(run_tm_loss)
np.random.seed(42)
n_res = consts.n_res
representations = {'pair': np.random.rand(n_res, n_res, consts.c_z).astype(np.float32)}
batch = {'backbone_affine_tensor': random_affines_vector((n_res,)), 'backbone_affine_mask': np.random.randint(0, 2, (n_res,)).astype(np.float32), 'resolution': np.array(1.0).astype(np.float32)}
value = {'structure_module': {'final_affines': random_affines_vector((n_res,))}}
params = compare_utils.fetch_alphafold_module_weights('alphafold/alphafold_iteration/predicted_aligned_error_head')
out_gt = f.apply(params, None, representations, batch, value)
out_gt = torch.tensor(np.array(out_gt.block_until_ready()))
to_tensor = (lambda n: torch.tensor(n).cuda())
representations = tree_map(to_tensor, representations, np.ndarray)
batch = tree_map(to_tensor, batch, np.ndarray)
value = tree_map(to_tensor, value, np.ndarray)
batch['backbone_rigid_tensor'] = affine_vector_to_4x4(batch['backbone_affine_tensor'])
batch['backbone_rigid_mask'] = batch['backbone_affine_mask']
model = compare_utils.get_global_pretrained_openfold()
logits = model.aux_heads.tm(representations['pair'])
out_repro = tm_loss(logits=logits, final_affine_tensor=value['structure_module']['final_affines'], **{**batch, **c_tm})
out_repro = out_repro.cpu()
self.assertTrue((torch.max(torch.abs((out_gt - out_repro))) < consts.eps)) |
class Data_Loader_toy():
def __init__(self, path, bsz, L, K, test=False):
self.data_path = path
self.bsz = bsz
self.test = test
self.L = L
self.K = K
self.get_data()
def get_data(self):
base_path = self.data_path
path_train = os.path.join(base_path, 'train-toy2.txt')
train_dataset = self.read_toy(path_train)
if (not self.test):
path_dev = os.path.join(base_path, 'dev-toy2.txt')
validation_dataset = self.read_toy(path_dev)
else:
path_test = os.path.join(base_path, 'test-toy2.txt')
test_dataset = self.read_toy(path_test)
'index the tokens'
train_dict = self.pre_process(train_dataset, self.bsz)
self.vocab2idx = train_dict['data_vocab']
self.state_vocab = train_dict['state_vocab']
self.train = train_dict['bsz_processed_data']
self.train_idx = train_dict['mb2lineno']
if (not self.test):
dev_dict = self.pre_process_dev(validation_dataset, train_dict, self.bsz)
(self.valid, self.valid_idx) = (dev_dict['bsz_processed_data'], dev_dict['mb2lineno'])
else:
test_dict = self.pre_process_dev(test_dataset, train_dict, self.bsz)
(self.test, self.test_idx) = (test_dict['bsz_processed_data'], test_dict['mb2lineno'])
return
def read_toy(self, path):
data_lst = []
state_lst = []
with open(path, 'r') as f:
for line in f:
(sent, state) = line.split('|||')
sent = (sent.split() + ['<eos>'])
state = state.split()
data_lst.append(sent)
state_lst.append(state)
return (data_lst, state_lst)
def get_vocab(self, data_lst):
vocab = []
for sent in data_lst:
for letter in sent:
if (letter not in vocab):
vocab.append(letter)
vocab = (['<bos>', '<pad>'] + vocab)
vocab = {x: i for (i, x) in enumerate(vocab)}
return vocab
def pre_process(self, data, bsz=20):
(data_lst, state_lst) = data
data_vocab = self.get_vocab(data_lst)
state_vocab = self.get_vocab(state_lst)
self.get_pr_dict(data_vocab)
print(data_vocab)
print(state_vocab)
processed_data = []
processed_state_lst = []
for (sent, state) in zip(data_lst, state_lst):
processed_data.append([data_vocab[x] for x in sent])
processed_state_lst.append([state_vocab[x] for x in state])
(bsz_processed_data, mb2lineno) = self.batchify(processed_data, processed_state_lst, data_lst, bsz)
result = {'data_vocab': data_vocab, 'state_vocab': state_vocab, 'processed_data': processed_data, 'processed_state_lst': processed_state_lst, 'bsz_processed_data': bsz_processed_data, 'mb2lineno': mb2lineno}
return result
def get_pr_dict(self, vocab):
grp_dict = {}
self.vocab2field = {}
for (key, val) in vocab.items():
if ('-' not in key):
grp_dict[key] = len(grp_dict)
self.vocab2field[val] = grp_dict[key]
continue
(grp, num) = key.split('-')
if (grp not in grp_dict):
grp_dict[grp] = len(grp_dict)
self.vocab2field[val] = grp_dict[grp]
return
def get_pr_mask(self, sent):
translate = [self.vocab2field[val] for val in sent]
prev = 0
curr = 0
curr_lab = (- 1)
temp = torch.LongTensor(self.L, len(translate), 1, self.K).fill_(0)
seglen = 0
while (curr < len(translate)):
if ((translate[curr] != curr_lab) or (seglen >= (self.L - 1))):
if (curr_lab != (- 1)):
temp[(seglen, prev, 0, curr_lab)] = 1
seglen = 0
curr_lab = translate[curr]
prev = curr
else:
seglen += 1
curr += 1
temp[(seglen, prev, 0, curr_lab)] = 1
return temp
def pre_process_dev(self, data, result_train, bsz=20):
(data_lst, state_lst) = data
data_vocab = result_train['data_vocab']
state_vocab = result_train['state_vocab']
print(data_vocab)
print(state_vocab)
processed_data = []
processed_state_lst = []
for (sent, state) in zip(data_lst, state_lst):
processed_data.append([data_vocab[x] for x in sent])
processed_state_lst.append([state_vocab[x] for x in state])
(bsz_processed_data, mb2lineno) = self.batchify(processed_data, processed_state_lst, data_lst, bsz)
result = {'data_vocab': data_vocab, 'state_vocab': state_vocab, 'processed_data': processed_data, 'processed_state_lst': processed_state_lst, 'bsz_processed_data': bsz_processed_data, 'mb2lineno': mb2lineno}
return result
def batchify(self, token_lst, processed_state_lst, data_lst, bsz):
(sents, sorted_idxs) = zip(*sorted(zip(token_lst, range(len(token_lst))), key=(lambda x: len(x[0]))))
(minibatches, mb2linenos) = ([], [])
curr_batch = []
curr_linenos = []
curr_strbsz = []
curr_statez = []
mask_pr = []
curr_len = len(sents[0])
for i in range(len(sents)):
if ((len(sents[i]) != curr_len) or (len(curr_batch) == bsz)):
minibatches.append((torch.LongTensor(curr_batch), curr_statez, mask_pr, None, curr_strbsz))
mb2linenos.append(curr_linenos)
curr_batch = [sents[i]]
curr_len = len(sents[i])
curr_strbsz = [data_lst[sorted_idxs[i]]]
curr_statez = [processed_state_lst[sorted_idxs[i]]]
curr_linenos = [sorted_idxs[i]]
mask_pr = [self.get_pr_mask(sents[i])]
else:
curr_batch.append(sents[i])
curr_strbsz.append(data_lst[sorted_idxs[i]])
curr_statez.append(processed_state_lst[sorted_idxs[i]])
curr_linenos.append(sorted_idxs[i])
mask_pr.append(self.get_pr_mask(sents[i]))
if (len(curr_batch) > 0):
minibatches.append((torch.LongTensor(curr_batch), curr_statez, mask_pr, None, curr_strbsz))
mb2linenos.append(curr_linenos)
return (minibatches, mb2linenos) |
def normalize_probs(m):
return tf.math.divide(m, tf.reshape(tf.reduce_sum(m, axis=1), [(- 1), 1])) |
def copy_BraTS_segmentation_and_convert_labels(in_file, out_file):
img = sitk.ReadImage(in_file)
img_npy = sitk.GetArrayFromImage(img)
uniques = np.unique(img_npy)
for u in uniques:
if (u not in [0, 1, 2, 4]):
raise RuntimeError('unexpected label')
seg_new = np.zeros_like(img_npy)
seg_new[(img_npy == 4)] = 3
seg_new[(img_npy == 2)] = 1
seg_new[(img_npy == 1)] = 2
img_corr = sitk.GetImageFromArray(seg_new)
img_corr.CopyInformation(img)
sitk.WriteImage(img_corr, out_file) |
.parametrize('debugging', [False, True])
.parametrize('ofolder', [str(Path(__tmp_dir__, 'test')), str(Path(__tmp_dir__, 'mixup_test'))])
def test_mixup(debugging, ofolder):
inp = [[[[0 for i in range(40)] for i in range(40)]]]
targ = [[[[0 for i in range(40)] for i in range(40)]]]
for i in range(10):
for j in range(10):
targ[0][0][i][j] = 1
inp = torch.tensor(inp).float()
targ = torch.tensor(targ).float()
imed_mixup.mixup(inp, targ, alpha=0.5, debugging=debugging, ofolder=ofolder) |
_materialize('tensorflow')
class Reverse(UnaryOpBase):
in_dtypes = [(i,) for i in DTYPE_GEN_ALL]
out_dtypes = [(i,) for i in DTYPE_GEN_ALL]
def __init__(self):
super().__init__()
self.inp_ranks = [rank_from(1)]
self.out_ranks = [rank_from(1)]
def _init_axis(self, input_shape: List[Union[(int, z3.ExprRef)]]):
if ('axis' not in self.extra_attrs):
axis = []
for i in range(len(input_shape)):
if (random.random() < 0.5):
axis.append(i)
self.extra_attrs['axis'] = axis
ConstraintCheck.le(len(self.extra_attrs['axis']), len(input_shape))
if self.extra_attrs['axis']:
ConstraintCheck.lt(max(self.extra_attrs['axis']), len(input_shape))
return self.extra_attrs['axis']
def type_transfer(self, input_shapes: List[AbsTensor]) -> List[AbsTensor]:
_ = self._init_axis(input_shapes[0].shape)
return input_shapes
def requires(self, input_shapes):
_ = self._init_axis(input_shapes[0].shape)
return super().requires(input_shapes)
def deduct_inp_ranks_and_dtype(self, out_abs_tensor: List[AbsTensor]) -> List[Tuple[(int, DType)]]:
return [(out_abs_tensor[0].ndims, out_abs_tensor[0].dtype)] |
def predToSegmentation(pred):
Max = pred.max(dim=1, keepdim=True)[0]
x = (pred / Max)
return (x == 1).float() |
class ConvBertTokenizerFast(BertTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
slow_tokenizer_class = ConvBertTokenizer |
class TrainTransform():
def __init__(self, aug_mode):
self.aug_mode = aug_mode
if (self.aug_mode == 1):
t = [JitterPoints(sigma=0.1, clip=0.2), RemoveRandomPoints(r=(0.0, 0.1)), RandomTranslation(max_delta=0.3), RemoveRandomBlock(p=0.4)]
elif (self.aug_mode == 2):
t = [JitterPoints(sigma=0.1, clip=0.2), RemoveRandomPoints(r=(0.0, 0.1)), RandomTranslation(max_delta=0.3), RandomRotation(max_theta=180, axis=np.array([0, 0, 1])), RemoveRandomBlock(p=0.4)]
else:
raise NotImplementedError('Unknown aug_mode: {}'.format(self.aug_mode))
self.transform = transforms.Compose(t)
def __call__(self, e):
if (self.transform is not None):
e = self.transform(e)
return e |
def build_post_process(args):
if PLATFORM_IS_WINDOWS:
lapack_paths = glob.glob(os.path.join(args.install_path, 'lib64/lapack_blas_windows/*.dll'))
if lapack_paths:
for lapack_path in lapack_paths:
copy_file_if_not_exists(lapack_path, os.path.join(args.install_path, 'lib', os.path.basename(lapack_path)))
if args.qt_path:
copy_file_if_not_exists(os.path.join(args.qt_path, 'bin/Qt5Core.dll'), os.path.join(args.install_path, 'lib/Qt5Core.dll'))
copy_file_if_not_exists(os.path.join(args.qt_path, 'bin/Qt5Gui.dll'), os.path.join(args.install_path, 'lib/Qt5Gui.dll'))
copy_file_if_not_exists(os.path.join(args.qt_path, 'bin/Qt5Widgets.dll'), os.path.join(args.install_path, 'lib/Qt5Widgets.dll'))
mkdir_if_not_exists(os.path.join(args.install_path, 'lib/platforms'))
copy_file_if_not_exists(os.path.join(args.qt_path, 'plugins/platforms/qwindows.dll'), os.path.join(args.install_path, 'lib/platforms/qwindows.dll'))
if (args.with_cuda and args.cuda_path):
cudart_lib_path = glob.glob(os.path.join(args.cuda_path, 'bin/cudart64_*.dll'))[0]
copy_file_if_not_exists(cudart_lib_path, os.path.join(args.install_path, 'lib', os.path.basename(cudart_lib_path)))
if args.cgal_path:
gmp_lib_path = os.path.join(args.cgal_path, 'auxiliary/gmp/lib/libgmp-10.dll')
if os.path.exists(gmp_lib_path):
copy_file_if_not_exists(gmp_lib_path, os.path.join(args.install_path, 'lib/libgmp-10.dll'))
cgal_lib_path = glob.glob(os.path.join(args.cgal_path, 'bin/CGAL-vc*-mt-*.dll'))
copy_file_if_not_exists(cgal_lib_path[0], os.path.join(args.install_path, 'lib', os.path.basename(cgal_lib_path[0]))) |
def block(inp, nbfilters, dropout, weight_decay, channel_axis, subsample=(1, 1), batchnorm_training=True, use_bias=True):
x = inp
for i in [1, 2]:
x = BatchNormalization(axis=channel_axis, center=batchnorm_training, scale=batchnorm_training)(x)
x = Activation('relu')(x)
if ((dropout > 0.0) and (i == 2)):
x = Dropout(dropout)(x)
x = ZeroPadding2D((1, 1))(x)
if ((subsample is not None) and (i == 1)):
x = Conv2D(nbfilters, (3, 3), strides=subsample, kernel_regularizer=l2(weight_decay), use_bias=use_bias)(x)
else:
x = Conv2D(nbfilters, (3, 3), kernel_regularizer=l2(weight_decay), use_bias=use_bias)(x)
if ((subsample == (1, 1)) and (inp._keras_shape[channel_axis] == nbfilters)):
return add([x, inp])
else:
return add([x, Conv2D(nbfilters, (1, 1), strides=subsample, kernel_regularizer=l2(weight_decay), use_bias=use_bias)(inp)]) |
def test_remove_last_from_packed_seq():
padded_seq = torch.tensor([[1, 2, 3], [4, 3, 0], [12, 18, 0]])
orig_lengths = torch.tensor([3, 2, 2])
packed_seq = rnn.pack_padded_sequence(padded_seq, orig_lengths, batch_first=True)
computed = torch_utils.remove_last_from_packed_seq(packed_seq)
(computed_padded_seq, lengths) = rnn.pad_packed_sequence(computed, batch_first=True)
expected_computed_padded_seq = np.array([[1, 2], [4, 0], [12, 0]])
expected_lengths = (orig_lengths - 1)
np.testing.assert_array_equal(expected_computed_padded_seq, computed_padded_seq.numpy())
np.testing.assert_array_equal(lengths, expected_lengths.numpy()) |
_start_docstrings('CamemBERT Model with a `language modeling` head on top. ', CAMEMBERT_START_DOCSTRING)
class TFCamembertForMaskedLM(TFRobertaForMaskedLM):
config_class = CamembertConfig |
class TFRoFormerForMaskedLM(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class BatchEnv(object):
def __init__(self, envs, blocking):
self._envs = envs
self._blocking = blocking
observ_space = self._envs[0].observation_space
if (not all(((env.observation_space == observ_space) for env in self._envs))):
raise ValueError('All environments must use the same observation space.')
action_space = self._envs[0].action_space
if (not all(((env.action_space == action_space) for env in self._envs))):
raise ValueError('All environments must use the same observation space.')
def __len__(self):
return len(self._envs)
def __getitem__(self, index):
return self._envs[index]
def __getattr__(self, name):
return getattr(self._envs[0], name)
def step(self, actions):
for (index, (env, action)) in enumerate(zip(self._envs, actions)):
if (not env.action_space.contains(action)):
message = 'Invalid action at index {}: {}'
raise ValueError(message.format(index, action))
if self._blocking:
transitions = [env.step(action) for (env, action) in zip(self._envs, actions)]
else:
transitions = [env.step(action, blocking=False) for (env, action) in zip(self._envs, actions)]
transitions = [transition() for transition in transitions]
(observs, rewards, dones, infos) = zip(*transitions)
observ = np.stack(observs)
reward = np.stack(rewards)
done = np.stack(dones)
info = tuple(infos)
return (observ, reward, done, info)
def reset(self, indices=None):
if (indices is None):
indices = np.arange(len(self._envs))
if self._blocking:
observs = [self._envs[index].reset() for index in indices]
else:
observs = [self._envs[index].reset(blocking=False) for index in indices]
observs = [observ() for observ in observs]
observ = np.stack(observs)
return observ
def close(self):
for env in self._envs:
if hasattr(env, 'close'):
env.close() |
_sentencepiece
class SpeechToTextTokenizerMultilinguialTest(unittest.TestCase):
checkpoint_name = 'valhalla/s2t_mustc_multilinguial_medium'
french_text = "C'est trop cool"
spanish_text = 'Esto es genial'
def setUpClass(cls):
cls.tokenizer: Speech2TextTokenizer = Speech2TextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def check_language_codes(self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'], 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'], 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'], 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'], 11)
def test_vocab_size(self):
self.assertEqual(self.tokenizer.vocab_size, 10000)
def test_tokenizer_decode_ignores_language_codes(self):
self.assertIn(ES_CODE, self.tokenizer.all_special_ids)
generated_ids = [ES_CODE, 4, 1601, 47, 7647, 2]
result = self.tokenizer.decode(generated_ids, skip_special_tokens=True)
expected_spanish = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True)
self.assertEqual(result, expected_spanish)
self.assertNotIn(self.tokenizer.eos_token, result)
def test_tokenizer_adds_special_tokens(self):
self.tokenizer.tgt_lang = 'fr'
encoded = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0], FR_CODE)
self.assertEqual(encoded[(- 1)], self.tokenizer.eos_token_id)
def test_tgt_lang_setter(self):
self.tokenizer.tgt_lang = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE])
self.tokenizer.tgt_lang = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE]) |
def training_params(is_gcloud=False, output_dir=None):
if (not output_dir):
output_dir = util.construct_experiment_output_dir(__file__)
num_gpus = 1
stop_after = 7
dynamic_batch_size = {2: 128, 3: 128, 4: 64, 5: 32, 6: 16, 7: 6, 8: 3, 9: 2}
imgs_per_phase = 384000
dynamic_steps_per_phase = {phase: max((imgs_per_phase / batch_size), 6000) for (phase, batch_size) in dynamic_batch_size.items()}
dynamic_steps_per_phase[7] *= 2
return train.TrainingParams(description=DESCRIPTION, is_gcloud=is_gcloud, num_gpus=num_gpus, dataset_params=celeba_hq_dataset.get_dataset_params(is_gcloud=is_gcloud, crop_at_center=True), checkpoint_every_n_steps=None, checkpoint_every_n_secs=(30 * 60), dynamic_steps_per_phase=dynamic_steps_per_phase, dynamic_batch_size=dynamic_batch_size, stop_after=stop_after, eval_every_n_secs=((48 * 60) * 60), write_summaries_every_n_steps=700, infogan_summary_reps=0, output_dir=output_dir, allow_initial_partial_restore=True, noise_size=64, noise_stddev=1.0, summary_grid_size=3, infogan_cont_weight=10.0, infogan_cont_depth_to_num_vars={2: 16, 3: 16, 4: 16, 5: 16, 6: 16, 7: 0, 8: 0, 9: 0}, generator_params=networks.GeneratorParams(channels_at_4x4=2048, channels_max=480, optimizer=('adam_b0_b99', 0.0005), ema_decay_for_visualization=0.999, consistency_loss=300.0, consistency_temporal=False, weight_norm='equalized', norm='batch_norm_in_place', norm_per_gpu=True, double_conv=True, conditioning=False, infogan_input_method='custom03'), discriminator_params=networks.DiscriminatorParams(channels_at_2x2=4096, channels_max=512, conditioning=False, optimizer=('adam_b0_b99', 0.0005), weight_norm='equalized', norm=None, norm_per_gpu=True, double_conv=True, second_conv_channels_x2=True), use_gpu_tower_scope=True) |
def missing_explanation(json):
for recommendations in json.values():
for rec in recommendations:
if ('explanation' not in rec):
return ('Recommendations must include explanation.', 400)
return False |
def find_dense(path, data):
nodes = set()
graph = defaultdict(list)
graph_obj = np.load((path + '.npz'), allow_pickle=True)
src_li = graph_obj['src_li']
dst_li = graph_obj['dst_li']
num_nodes = graph_obj['num_nodes']
for (src, dst) in zip(src_li, dst_li):
nodes.add(src)
nodes.add(dst)
graph[dst].append(src)
tile_cnt = 0
opt_cnt = 0
chunk_edges = []
for src_iter in range(0, num_nodes, dense_tile_H):
dst_list = []
for src in range(src_iter, (src_iter + dense_tile_H)):
dst_list += graph[src]
actual_cnt = len(dst_list)
chunk_edges.append(len(dst_list))
range_set = sorted(list(set(dst_list)))
opt_cnt += (((len(range_set) + dense_tile_W) - 1) // dense_tile_W)
tmp_opt_cnt = (((len(range_set) + dense_tile_W) - 1) // dense_tile_W)
exp_opt_cnt = ((dense_tile_H * dense_tile_W) * tmp_opt_cnt)
tmp = 0
range_set = sorted(list(range_set))
i = j = 0
while ((i < len(range_set)) and (j < len(range_set))):
end = (range_set[i] + dense_tile_W)
while ((j < len(range_set)) and (range_set[j] < end)):
j += 1
i = j
tile_cnt += 1
tmp += 1
exp_tile_cnt = ((dense_tile_H * dense_tile_W) * tile_cnt)
if (tmp < tmp_opt_cnt):
print(range_set)
print(tmp, tmp_opt_cnt)
print('tmp < tmp_opt_cnt Error Encounter, Duplicate Edges')
sys.exit(0)
print('{},{},{},{:.2f}'.format(data, tile_cnt, opt_cnt, ((100 * (tile_cnt - opt_cnt)) / tile_cnt)))
fout = open('3_cnt_TC_blk_SDDMM.csv', 'a')
fout.write('{},{},{},{:.2f}\n'.format(data, tile_cnt, opt_cnt, ((100 * (tile_cnt - opt_cnt)) / tile_cnt))) |
class GitConfig(PretrainedConfig):
model_type = 'git'
def __init__(self, vision_config=None, vocab_size=30522, hidden_size=768, num_hidden_layers=6, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=1024, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, tie_word_embeddings=False, bos_token_id=101, eos_token_id=102, num_image_with_embedding=None, **kwargs):
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs)
if (vision_config is None):
vision_config = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.')
self.vision_config = GitVisionConfig(**vision_config)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.tie_word_embeddings = tie_word_embeddings
self.num_image_with_embedding = num_image_with_embedding
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
def to_dict(self):
output = copy.deepcopy(self.__dict__)
output['vision_config'] = self.vision_config.to_dict()
output['model_type'] = self.__class__.model_type
return output |
def _build_expression(inputs, output=None, size_dict=None, optimize='auto', implementation=None, prefer_einsum=False, autojit=False, via=None, sort_contraction_indices=False):
if (len(inputs) == 1):
term = tuple(inputs[0])
output = tuple(output)
if (term == output):
def fn(*arrays, backend=None):
if (backend is None):
return arrays[0]
return ar.do('array', arrays[0], like=backend)
elif (len(term) == len(output)):
perm = tuple(map(term.index, output))
def fn(*arrays, backend=None):
return ar.do('transpose', arrays[0], perm, like=backend)
else:
eq = inputs_output_to_eq(inputs, output)
def fn(*arrays, backend=None):
return ar.do('einsum', eq, arrays[0], like=backend)
else:
tree = array_contract_tree(inputs, output, size_dict, optimize=optimize, sort_contraction_indices=sort_contraction_indices)
if (not tree.sliced_inds):
fn = tree.get_contractor(autojit=autojit, prefer_einsum=prefer_einsum, implementation=implementation)
else:
fn = Variadic(tree.contract, autojit=autojit, prefer_einsum=prefer_einsum, implementation=implementation)
if (via is not None):
fn = Via(fn, *via)
return fn |
def debug(msg, *args):
if (MIN_LEVEL <= DEBUG):
print(('%s: %s' % ('DEBUG', (msg % args)))) |
_REGISTRY.register()
class STL10(DatasetBase):
dataset_dir = 'stl10'
def __init__(self, cfg):
root = osp.abspath(osp.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = osp.join(root, self.dataset_dir)
train_dir = osp.join(self.dataset_dir, 'train')
test_dir = osp.join(self.dataset_dir, 'test')
unlabeled_dir = osp.join(self.dataset_dir, 'unlabeled')
fold_file = osp.join(self.dataset_dir, 'stl10_binary', 'fold_indices.txt')
assert (0 <= cfg.DATASET.STL10_FOLD <= 4)
train_x = self._read_data_train(train_dir, cfg.DATASET.STL10_FOLD, fold_file)
train_u = self._read_data_all(unlabeled_dir)
test = self._read_data_all(test_dir)
if cfg.DATASET.ALL_AS_UNLABELED:
train_u = (train_u + train_x)
super().__init__(train_x=train_x, train_u=train_u, test=test)
def _read_data_train(self, data_dir, fold, fold_file):
imnames = listdir_nohidden(data_dir)
imnames.sort()
items = []
list_idx = list(range(len(imnames)))
if (fold >= 0):
with open(fold_file, 'r') as f:
str_idx = f.read().splitlines()[fold]
list_idx = np.fromstring(str_idx, dtype=np.uint8, sep=' ')
for i in list_idx:
imname = imnames[i]
impath = osp.join(data_dir, imname)
label = osp.splitext(imname)[0].split('_')[1]
label = int(label)
item = Datum(impath=impath, label=label)
items.append(item)
return items
def _read_data_all(self, data_dir):
imnames = listdir_nohidden(data_dir)
items = []
for imname in imnames:
impath = osp.join(data_dir, imname)
label = osp.splitext(imname)[0].split('_')[1]
if (label == 'none'):
label = (- 1)
else:
label = int(label)
item = Datum(impath=impath, label=label)
items.append(item)
return items |
class TFMPNetForMaskedLM():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
class TSPDecoder(DecoderBase):
def get_context(self, observation: Observation, embeddings: Array) -> Array:
return jnp.concatenate([embeddings.mean(0), embeddings[observation.position], embeddings[observation.start_position]], axis=0)[None]
def get_transformed_attention_mask(self, attention_mask: Array) -> Array:
return attention_mask |
def test_running_stat():
for shp in ((), (3,), (3, 4)):
li = []
rs = RunningStat(shp)
for _ in range(5):
val = np.random.randn(*shp)
rs.push(val)
li.append(val)
m = np.mean(li, axis=0)
assert np.allclose(rs.mean, m)
v = (np.square(m) if (len(li) == 1) else np.var(li, ddof=1, axis=0))
assert np.allclose(rs.var, v) |
_module()
class AutoAugment(object):
def __init__(self, policies, hparams=_HPARAMS_DEFAULT):
assert (isinstance(policies, list) and (len(policies) > 0)), 'Policies must be a non-empty list.'
for policy in policies:
assert (isinstance(policy, list) and (len(policy) > 0)), 'Each policy in policies must be a non-empty list.'
for augment in policy:
assert (isinstance(augment, dict) and ('type' in augment)), 'Each specific augmentation must be a dict with key "type".'
self.hparams = hparams
policies = copy.deepcopy(policies)
self.policies = []
for sub in policies:
merged_sub = [merge_hparams(policy, hparams) for policy in sub]
self.policies.append(merged_sub)
self.sub_policy = [Compose(policy) for policy in self.policies]
def __call__(self, results):
sub_policy = random.choice(self.sub_policy)
return sub_policy(results)
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(policies={self.policies})'
return repr_str |
def time_adapter(func):
def inner(*args, **kwargs):
(h_embedding, r_embedding, t_embedding) = func(*args, **kwargs)
model = args[0]
start = kwargs['data'][3]
end = kwargs['data'][4]
if (not model.init_time_adapter):
model.start_time_transfer = nn.Embedding(num_embeddings=model.time_dict_len, embedding_dim=10).to(model.model_device)
model.end_time_transfer = nn.Embedding(num_embeddings=model.time_dict_len, embedding_dim=10).to(model.model_device)
model.init_time_adapter = True
strat_embedding = model.start_time_transfer(start)
end_embedding = model.end_time_transfer(end)
h_embedding = torch.cat((h_embedding, strat_embedding, end_embedding), dim=1)
r_embedding = torch.cat((r_embedding, strat_embedding, end_embedding), dim=1)
t_embedding = torch.cat((t_embedding, strat_embedding, end_embedding), dim=1)
return (h_embedding, r_embedding, t_embedding)
return inner |
def reshape_hidden_states_to_3d(hidden_states):
hs = hidden_states
if isinstance(hs, tuple):
hs = torch.stack(hs)
hs = hs.reshape((hs.shape[0], (- 1), hs.shape[(- 1)]))
return hs |
class LossScaler():
def __init__(self, init_scale=(2 ** 32), mode='dynamic', scale_factor=2.0, scale_window=1000):
self.cur_scale = init_scale
self.cur_iter = 0
assert (mode in ('dynamic', 'static')), 'mode can only be dynamic or static'
self.mode = mode
self.last_overflow_iter = (- 1)
self.scale_factor = scale_factor
self.scale_window = scale_window
def has_overflow(self, params):
if (self.mode != 'dynamic'):
return False
for p in params:
if ((p.grad is not None) and LossScaler._has_inf_or_nan(p.grad.data)):
return True
return False
def _has_inf_or_nan(x):
try:
cpu_sum = float(x.float().sum())
except RuntimeError as instance:
if ('value cannot be converted' not in instance.args[0]):
raise
return True
else:
if ((cpu_sum == float('inf')) or (cpu_sum == (- float('inf'))) or (cpu_sum != cpu_sum)):
return True
return False
def update_scale(self, overflow):
if (self.mode != 'dynamic'):
return
if overflow:
self.cur_scale = max((self.cur_scale / self.scale_factor), 1)
self.last_overflow_iter = self.cur_iter
elif (((self.cur_iter - self.last_overflow_iter) % self.scale_window) == 0):
self.cur_scale *= self.scale_factor
self.cur_iter += 1
def state_dict(self):
return dict(cur_scale=self.cur_scale, cur_iter=self.cur_iter, mode=self.mode, last_overflow_iter=self.last_overflow_iter, scale_factor=self.scale_factor, scale_window=self.scale_window)
def load_state_dict(self, state_dict):
self.cur_scale = state_dict['cur_scale']
self.cur_iter = state_dict['cur_iter']
self.mode = state_dict['mode']
self.last_overflow_iter = state_dict['last_overflow_iter']
self.scale_factor = state_dict['scale_factor']
self.scale_window = state_dict['scale_window']
def loss_scale(self):
return self.cur_scale |
class BaseMaskHead(BaseModule, metaclass=ABCMeta):
def __init__(self, init_cfg):
super(BaseMaskHead, self).__init__(init_cfg)
def loss(self, **kwargs):
pass
def get_results(self, **kwargs):
pass
def forward_train(self, x, gt_labels, gt_masks, img_metas, gt_bboxes=None, gt_bboxes_ignore=None, positive_infos=None, **kwargs):
if (positive_infos is None):
outs = self(x)
else:
outs = self(x, positive_infos)
assert isinstance(outs, tuple), 'Forward results should be a tuple, even if only one item is returned'
loss = self.loss(*outs, gt_labels=gt_labels, gt_masks=gt_masks, img_metas=img_metas, gt_bboxes=gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, positive_infos=positive_infos, **kwargs)
return loss
def simple_test(self, feats, img_metas, rescale=False, instances_list=None, **kwargs):
if (instances_list is None):
outs = self(feats)
else:
outs = self(feats, instances_list=instances_list)
mask_inputs = (outs + (img_metas,))
results_list = self.get_results(*mask_inputs, rescale=rescale, instances_list=instances_list, **kwargs)
return results_list
def onnx_export(self, img, img_metas):
raise NotImplementedError(f'{self.__class__.__name__} does not support ONNX EXPORT') |
def _getlogger():
logger = logging.getLogger('tensorpack')
logger.propagate = False
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
logger.addHandler(handler)
return logger |
def create_matrices_for_rliable(data_dictionary: Dict[(str, Dict[(str, Any)])], environment_name: str, metrics_to_normalize: List[str]) -> Tuple[(Dict[(str, Dict[(str, Any)])], Dict[(str, Dict[(str, Any)])])]:
(environment_name, metrics_to_normalize) = lower_case_inputs(environment_name, metrics_to_normalize)
try:
env_name = environment_name
data_env = data_dictionary[env_name]
extra = data_dictionary.pop('extra')
tasks = list(data_env.keys())
algorithms = list(data_env[tasks[0]].keys())
runs = list(data_env[tasks[0]][algorithms[0]].keys())
steps = list(data_env[tasks[0]][algorithms[0]][runs[0]].keys())
absolute_metric_key = check_absolute_metric(steps)
if (absolute_metric_key is None):
raise Exception('The final logging step for a given run should contain the absolute_metrics values in a step called absolute_metrics.')
absolute_metrics = list(data_env[tasks[0]][algorithms[0]][runs[0]][absolute_metric_key].keys())
def _select_metrics_for_plotting(absolute_metrics: list) -> list:
metrics_to_plot = []
for metric in absolute_metrics:
metric_split = metric.split('_')
metric_in_absolute = len(set(metric_split).intersection(set(metrics_to_normalize)))
if (metric.split('_')[0].lower() == 'mean'):
if ((metric_in_absolute > 0) and (metric_split[1].lower() == 'norm')):
metrics_to_plot.append(metric)
elif (metric_in_absolute == 0):
metrics_to_plot.append(metric)
return metrics_to_plot
mean_absolute_metrics = _select_metrics_for_plotting(absolute_metrics)
metric_dictionary: Dict[(str, Any)] = {}
for metric in mean_absolute_metrics:
metric_dictionary[metric] = {}
for algorithm in algorithms:
metric_dictionary[metric][algorithm] = np.zeros(shape=(len(runs), len(tasks)))
for metric in mean_absolute_metrics:
for algorithm in algorithms:
for (i, run) in enumerate(runs):
for (j, task) in enumerate(tasks):
metric_data = data_env[task][algorithm][run][absolute_metric_key][metric]
data = (np.mean(metric_data) if isinstance(metric_data, list) else metric_data)
metric_dictionary[metric][algorithm][i][j] = data
metric_dictionary_return = metric_dictionary
master_metric_dictionary: Dict[(str, Any)] = {}
for metric in mean_absolute_metrics:
master_metric_dictionary[metric] = {}
for algorithm in algorithms:
master_metric_dictionary[metric][algorithm] = []
steps.remove(absolute_metric_key)
for step in steps:
metric_dictionary = {}
for metric in mean_absolute_metrics:
metric_dictionary[metric] = {}
for algorithm in algorithms:
metric_dictionary[metric][algorithm] = np.zeros(shape=(len(runs), len(tasks)))
for metric in mean_absolute_metrics:
for algorithm in algorithms:
for (i, run) in enumerate(runs):
for (j, task) in enumerate(tasks):
metric_data = data_env[task][algorithm][run][step][metric]
data = (np.mean(metric_data) if isinstance(metric_data, list) else metric_data)
metric_dictionary[metric][algorithm][i][j] = data
for metric in mean_absolute_metrics:
for algorithm in algorithms:
master_metric_dictionary[metric][algorithm].append(metric_dictionary[metric][algorithm])
final_metric_tensor_dictionary: Dict[(str, Any)] = {}
for metric in mean_absolute_metrics:
final_metric_tensor_dictionary[metric] = {}
for algorithm in algorithms:
final_metric_tensor_dictionary[metric][algorithm] = np.stack(master_metric_dictionary[metric][algorithm], axis=2)
extra['evaluation_interval'] = extra['evaluation_interval'][env_name]
final_metric_tensor_dictionary['extra'] = extra
data_dictionary['extra'] = extra
return (metric_dictionary_return, final_metric_tensor_dictionary)
except Exception as e:
print(e, ': There is an issue related to the format of the json file!')
print(('We recommend using the DiagnoseData class from ' + 'marl_eval/utils/diagnose_data_errors.py to determine the error.'))
return ({}, {}) |
def validate_keras_model(platform, device_type, model_file, input_file, mace_out_file, input_names, input_shapes, input_data_formats, output_names, output_shapes, output_data_formats, validation_threshold, input_data_types, output_data_types, log_file):
from tensorflow import keras
import tensorflow_model_optimization as tfmot
if (not os.path.isfile(model_file)):
common.MaceLogger.error(VALIDATION_MODULE, (("Input model file '" + model_file) + "' does not exist!"))
with tfmot.quantization.keras.quantize_scope():
keras_model = keras.models.load_model(model_file, compile=False)
input = []
for i in range(len(input_names)):
input_value = load_data(common.formatted_file_name(input_file, input_names[i]), input_data_types[i])
input_value = input_value.reshape(input_shapes[i])
if ((input_data_formats[i] == common.DataFormat.NCHW) and (len(input_shapes[i]) == 4)):
input_value = input_value.transpose((0, 2, 3, 1))
elif ((input_data_formats[i] == common.DataFormat.OIHW) and (len(input_shapes[i]) == 4)):
input_value = input_value.transpose((2, 3, 1, 0))
input.append(input_value)
output_values = keras_model.predict(input)
for i in range(len(output_names)):
output_file_name = common.formatted_file_name(mace_out_file, output_names[i])
mace_out_value = load_data(output_file_name, output_data_types[i])
(mace_out_value, real_output_shape, real_output_data_format) = get_real_out_value_shape_df(platform, mace_out_value, output_shapes[i], output_data_formats[i])
compare_output(platform, device_type, output_names[i], mace_out_value, output_values[i], validation_threshold, log_file, real_output_shape, real_output_data_format) |
def extract_labelled_aerial_imagery(df_fullmerge2insee):
nb_tiles = df_fullmerge2insee.shape[0]
n_jbs = min(nb_tiles, MAX_NB_JOBS)
prepare_input = [(aerial_fname, gpd.GeoDataFrame(pd.DataFrame([idINSPIRE, insee_geom]).transpose().rename(columns={0: 'idINSPIRE', 1: 'geometry'}), crs={'init': 'epsg:3035'})) for (aerial_fname, idINSPIRE, insee_geom) in tqdm(df_fullmerge2insee.values)]
if (MAX_NB_JOBS > 1):
full_data = Parallel(n_jobs=n_jbs)((delayed(subextract_from_aerial_tile)(aerial_fname, gdf_to_extract) for (aerial_fname, gdf_to_extract) in tqdm(prepare_input)))
else:
full_data = [subextract_from_aerial_tile(aerial_fname, gdf_to_extract) for (aerial_fname, gdf_to_extract) in tqdm(prepare_input)] |
class Database():
_database = None
_protocol = None
_length = None
def __init__(self, path: PathLike, readahead: bool=True, pre_open: bool=False):
self.path = str(path)
self.readahead = readahead
self.pre_open = pre_open
self._has_fetched_an_item = False
def database(self):
if (self._database is None):
self._database = lmdb.open(path=self.path, readonly=True, readahead=self.readahead, max_spare_txns=256, lock=False)
return self._database
def database(self):
if (self._database is not None):
self._database.close()
self._database = None
def protocol(self):
if (self._protocol is None):
self._protocol = self._get(item='protocol', convert_key=(lambda key: key.encode('ascii')), convert_value=(lambda value: pickle.loads(value)))
return self._protocol
def keys(self):
protocol = self.protocol
keys = self._get(item='keys', convert_key=(lambda key: pickle.dumps(key, protocol=protocol)), convert_value=(lambda value: pickle.loads(value)))
return keys
def __len__(self):
if (self._length is None):
self._length = len(self.keys)
return self._length
def __getitem__(self, item):
self._has_fetched_an_item = True
if (not isinstance(item, list)):
item = self._get(item, self._convert_key, self._convert_value)
else:
item = self._gets(item, self._convert_keys, self._convert_values)
return item
def __contains__(self, item):
return (item in self.keys)
def index(self, index):
key = self.keys[index]
return (key, self[key])
def _get(self, item, convert_key, convert_value):
with self.database.begin() as txn:
with txn.cursor() as cursor:
item = self._fetch(cursor, item, convert_key, convert_value)
self._keep_database()
return item
def _gets(self, items, convert_keys, convert_values):
with self.database.begin() as txn:
with txn.cursor() as cursor:
items = self._fetchs(cursor, items, convert_keys, convert_values)
self._keep_database()
return items
def _fetch(self, cursor, key, convert_key, convert_value):
key = convert_key(key=key)
value = cursor.get(key=key)
value = convert_value(value=value)
return value
def _fetchs(self, cursor, keys, convert_keys, convert_values):
keys = convert_keys(keys=keys)
(_, values) = list(zip(*cursor.getmulti(keys)))
values = convert_values(values=values)
return values
def _convert_key(self, key):
return pickle.dumps(key, protocol=self.protocol)
def _convert_keys(self, keys):
return [self._convert_key(key=key) for key in keys]
def _convert_value(self, value):
return pickle.loads(value)
def _convert_values(self, values):
return [self._convert_value(value=value) for value in values]
def _keep_database(self):
if ((not self.pre_open) and (not self._has_fetched_an_item)):
del self.database
def __iter__(self):
return iter(self.keys)
def __del__(self):
del self.database |
class SCM(nn.Module):
def __int__(self, shape, in_dim, out_dim):
super(SCM, self).__int__()
self.dim = in_dim
self.shape = shape
self.conv1 = nn.Conv2d(in_dim, out_dim, 1)
self.pool1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = nn.Conv2d(in_dim, out_dim, 3)
self.pool2 = nn.MaxPool2d(kernel_size=2)
self.conv3 = nn.Conv2d(in_dim, out_dim, 5)
def forward(self, texture_feature):
feature1 = self.conv1(texture_feature)
feature2 = self.conv2(texture_feature)
feature3 = self.conv3(texture_feature) |
class InfoMixin(object):
def _get_doc(cls):
return cls.__doc__
def get_info(cls):
doc = parse_docstring(cls._get_doc())
return {'name': cls.get_name(), 'platform': cls.get_platform(), 'module': cls.__module__, 'title': doc['short_description'], 'description': doc['long_description'], 'parameters': doc['params'], 'schema': getattr(cls, 'CONFIG_SCHEMA', None), 'return': doc['return']} |
class DatasetEvaluators(DatasetEvaluator):
def __init__(self, evaluators):
super().__init__()
self._evaluators = evaluators
def reset(self):
for evaluator in self._evaluators:
evaluator.reset()
def process(self, inputs, outputs):
for evaluator in self._evaluators:
evaluator.process(inputs, outputs)
def evaluate(self):
results = OrderedDict()
for evaluator in self._evaluators:
result = evaluator.evaluate()
if (is_main_process() and (result is not None)):
for (k, v) in result.items():
assert (k not in results), 'Different evaluators produce results with the same key {}'.format(k)
results[k] = v
return results |
class Swin2SRForImageSuperResolution(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(wide_basic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != planes)):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True))
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out |
class FlaxElectraModelTester(unittest.TestCase):
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_attention_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, embedding_size=24, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_choices=4):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.embedding_size = embedding_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
config = ElectraConfig(vocab_size=self.vocab_size, hidden_size=self.hidden_size, embedding_size=self.embedding_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range)
return (config, input_ids, token_type_ids, attention_mask)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, token_type_ids, attention_mask) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return (config, inputs_dict) |
def _point_in_triangle(p, a, b, c):
whole = abs(calc_area([a, b, c]))
parta = abs(calc_area([a, b, p]))
partb = abs(calc_area([b, c, p]))
partc = abs(calc_area([c, a, p]))
thresh = 1e-07
return (((parta + partb) + partc) < (whole + thresh)) |
def sMAPE(y_true: 'ndarray', y_pred: 'ndarray', multioutput: str='raw_values') -> Union[(float64, 'ndarray')]:
(y_true, y_pred, original_shape) = _standardize_input(y_true, y_pred, multioutput)
output_errors = np.mean(((100 * np.abs((y_true - y_pred))) / ((np.abs(y_true) + np.abs(y_pred)) + EPSILON)), axis=0)
if (multioutput == 'raw_values'):
return output_errors.reshape(original_shape)
return np.mean(output_errors) |
def training_2nd_user_task_fbne(model, sess):
best_loss = 0
saver = tf.train.Saver()
data_train = fbne_data.Dataset(setting.oracle_training_file_user_task)
train_batches = data_train.get_positive_instances_user_task(0, 'train')
num_batch_train = ((data_train.oracle_num_users // setting.batch_size_user) + 1)
train_batch_index = range(num_batch_train)
data_valid = fbne_data.Dataset(setting.oracle_valid_file_user_task)
valid_batches = data_valid.get_positive_instances_user_task(0, 'valid')
num_batch_valid = ((data_valid.oracle_num_users // setting.batch_size_user) + 1)
valid_batch_index = range(num_batch_valid)
for epoch_count in range(setting.second_user_epoch):
train_begin = time()
training_batch_2nd_user_task_fbne(data_train, train_batch_index, model, sess, train_batches, True)
train_time = (time() - train_begin)
if ((epoch_count % setting.verbose) == 0):
loss_begin = time()
train_loss = training_loss_2nd_user_task_fbne(data_train, train_batch_index, model, sess, train_batches, True)
loss_time = (time() - loss_begin)
eval_begin = time()
(cosine, pearson) = evaluate_2nd_user_task_fbne(data_valid, valid_batch_index, model, sess, valid_batches, False)
eval_time = (time() - eval_begin)
print(('epoch %d, train time is %.4f, loss time is %.4f, eval_time is %.4f, train_loss is %.4f, test cosine value is %.4f, test pearson value is %.4f' % (epoch_count, train_time, loss_time, eval_time, train_loss, cosine, pearson)))
if (cosine < best_loss):
best_loss = cosine
saver.save(sess, setting.checkpoint_path_user_task, global_step=epoch_count)
data_train = fbne_data.Dataset(setting.oracle_training_file_user_task)
train_batches = data_train.get_positive_instances_user_task((epoch_count + 1), 'train')
num_batch_train = ((data_train.oracle_num_users // setting.batch_size_user) + 1)
train_batch_index = range(num_batch_train) |
def get_loader(dataset_name, root, batch_size, split='train', num_workers=2, shuffle=True):
if (dataset_name not in DATASETS):
raise Exception('[!] No data loader found for the dataset: {}.'.format(dataset_name))
transform_list = []
if (split == 'train'):
if (dataset_name == 'cifar10'):
transform_list.append(transforms.RandomHorizontalFlip())
transform_list.append(transforms.RandomCrop(32, 4))
if (dataset_name == 'mnist'):
transform_list.append(transforms.RandomCrop(28, 4))
if (dataset_name == 'tinyimagenet'):
transform_list.append(transforms.RandomRotation(20))
transform_list.append(transforms.RandomHorizontalFlip())
transform_list.append(transforms.ToTensor())
transform_chain = transforms.Compose(transform_list)
if (dataset_name == 'mnist'):
item = datasets.MNIST(root=root, train=(split == 'train'), transform=transform_chain, download=True)
elif (dataset_name == 'cifar10'):
item = datasets.CIFAR10(root=root, train=(split == 'train'), transform=transform_chain, download=True)
elif (dataset_name == 'tinyimagenet'):
item = datasets.ImageFolder(os.path.join(root, ('train' if (split == 'train') else 'val')), transform=transform_chain)
print(dataset_name, split, item.__len__(), batch_size)
data_loader = torch.utils.data.DataLoader(dataset=item, batch_size=batch_size, shuffle=(split == 'train'), num_workers=num_workers)
return data_loader |
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--results_dir', default='', type=str, help='the results dir, default is expr_dir/results ')
self.parser.add_argument('--n_samples', type=int, default=5, help='#samples for multimodal')
self.parser.add_argument('--how_many', type=int, default=50, help='how many test images to run')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.isTrain = False |
def _create_wr_from_rx_rm_depth_ahat(filename, rx, user):
return wr_rm_depth_ahat(filename, rx.port, rx.mode, rx.divisor, rx.profile_z, rx.profile_ab, rx.level, rx.bitrate, rx.options, user) |
class Ui_MAIAN(object):
def setupUi(self, MAIAN):
MAIAN.setObjectName('MAIAN')
MAIAN.resize(950, 821)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MAIAN.sizePolicy().hasHeightForWidth())
MAIAN.setSizePolicy(sizePolicy)
MAIAN.setMinimumSize(QtCore.QSize(950, 815))
MAIAN.setMaximumSize(QtCore.QSize(950, 815))
font = QtGui.QFont()
font.setFamily('Liberation Sans')
font.setPointSize(10)
MAIAN.setFont(font)
self.groupBox = QtWidgets.QGroupBox(MAIAN)
self.groupBox.setGeometry(QtCore.QRect(10, 640, 431, 111))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.groupBox.setFont(font)
self.groupBox.setObjectName('groupBox')
self.lineMaxFuncInv = QtWidgets.QLineEdit(self.groupBox)
self.lineMaxFuncInv.setGeometry(QtCore.QRect(330, 30, 91, 27))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.lineMaxFuncInv.setFont(font)
self.lineMaxFuncInv.setFrame(False)
self.lineMaxFuncInv.setAlignment(((QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing) | QtCore.Qt.AlignVCenter))
self.lineMaxFuncInv.setObjectName('lineMaxFuncInv')
self.label_4 = QtWidgets.QLabel(self.groupBox)
self.label_4.setGeometry(QtCore.QRect(20, 40, 221, 20))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.label_4.setFont(font)
self.label_4.setObjectName('label_4')
self.label_5 = QtWidgets.QLabel(self.groupBox)
self.label_5.setGeometry(QtCore.QRect(20, 80, 171, 17))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.label_5.setFont(font)
self.label_5.setObjectName('label_5')
self.lineSolverTimeout = QtWidgets.QLineEdit(self.groupBox)
self.lineSolverTimeout.setGeometry(QtCore.QRect(330, 70, 91, 27))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.lineSolverTimeout.setFont(font)
self.lineSolverTimeout.setFrame(False)
self.lineSolverTimeout.setAlignment(((QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing) | QtCore.Qt.AlignVCenter))
self.lineSolverTimeout.setObjectName('lineSolverTimeout')
self.lineMaxFuncInv.raise_()
self.label_4.raise_()
self.label_5.raise_()
self.lineSolverTimeout.raise_()
self.groupBox_2 = QtWidgets.QGroupBox(MAIAN)
self.groupBox_2.setGeometry(QtCore.QRect(10, 10, 431, 621))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.groupBox_2.setFont(font)
self.groupBox_2.setObjectName('groupBox_2')
self.txtSolidity = QtWidgets.QTextEdit(self.groupBox_2)
self.txtSolidity.setGeometry(QtCore.QRect(20, 130, 391, 471))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.txtSolidity.setFont(font)
self.txtSolidity.setFrameShape(QtWidgets.QFrame.NoFrame)
self.txtSolidity.setFrameShadow(QtWidgets.QFrame.Plain)
self.txtSolidity.setAcceptRichText(False)
self.txtSolidity.setObjectName('txtSolidity')
self.radioSolidity = QtWidgets.QRadioButton(self.groupBox_2)
self.radioSolidity.setGeometry(QtCore.QRect(20, 30, 191, 22))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioSolidity.setFont(font)
self.radioSolidity.setChecked(True)
self.radioSolidity.setObjectName('radioSolidity')
self.radioBytecodecompiled = QtWidgets.QRadioButton(self.groupBox_2)
self.radioBytecodecompiled.setGeometry(QtCore.QRect(20, 90, 171, 22))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioBytecodecompiled.setFont(font)
self.radioBytecodecompiled.setObjectName('radioBytecodecompiled')
self.radioBytecode = QtWidgets.QRadioButton(self.groupBox_2)
self.radioBytecode.setGeometry(QtCore.QRect(20, 60, 311, 22))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioBytecode.setFont(font)
self.radioBytecode.setObjectName('radioBytecode')
self.lineSolidityName = QtWidgets.QLineEdit(self.groupBox_2)
self.lineSolidityName.setGeometry(QtCore.QRect(300, 30, 113, 21))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.lineSolidityName.setFont(font)
self.lineSolidityName.setFrame(False)
self.lineSolidityName.setObjectName('lineSolidityName')
self.label_6 = QtWidgets.QLabel(self.groupBox_2)
self.label_6.setGeometry(QtCore.QRect(210, 30, 91, 20))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_6.setFont(font)
self.label_6.setObjectName('label_6')
self.groupBox_3 = QtWidgets.QGroupBox(MAIAN)
self.groupBox_3.setGeometry(QtCore.QRect(450, 10, 491, 741))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.groupBox_3.setFont(font)
self.groupBox_3.setObjectName('groupBox_3')
self.txtLog = QtWidgets.QTextEdit(self.groupBox_3)
self.txtLog.setGeometry(QtCore.QRect(20, 420, 451, 301))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.txtLog.setFont(font)
self.txtLog.setFrameShape(QtWidgets.QFrame.NoFrame)
self.txtLog.setFrameShadow(QtWidgets.QFrame.Plain)
self.txtLog.setAcceptRichText(False)
self.txtLog.setTextInteractionFlags((QtCore.Qt.TextSelectableByKeyboard | QtCore.Qt.TextSelectableByMouse))
self.txtLog.setObjectName('txtLog')
self.pushStart = QtWidgets.QPushButton(self.groupBox_3)
self.pushStart.setGeometry(QtCore.QRect(240, 30, 231, 91))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.pushStart.setFont(font)
self.pushStart.setObjectName('pushStart')
self.checkGreedy = QtWidgets.QCheckBox(self.groupBox_3)
self.checkGreedy.setGeometry(QtCore.QRect(20, 90, 151, 22))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.checkGreedy.setFont(font)
self.checkGreedy.setChecked(True)
self.checkGreedy.setObjectName('checkGreedy')
self.checkSuicidal = QtWidgets.QCheckBox(self.groupBox_3)
self.checkSuicidal.setGeometry(QtCore.QRect(20, 60, 151, 22))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.checkSuicidal.setFont(font)
self.checkSuicidal.setChecked(True)
self.checkSuicidal.setObjectName('checkSuicidal')
self.checkProdigal = QtWidgets.QCheckBox(self.groupBox_3)
self.checkProdigal.setGeometry(QtCore.QRect(20, 30, 171, 22))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.checkProdigal.setFont(font)
self.checkProdigal.setChecked(True)
self.checkProdigal.setObjectName('checkProdigal')
self.txtResults = QtWidgets.QTextEdit(self.groupBox_3)
self.txtResults.setGeometry(QtCore.QRect(20, 130, 451, 271))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(15)
font.setBold(False)
font.setWeight(50)
self.txtResults.setFont(font)
self.txtResults.setFrameShape(QtWidgets.QFrame.NoFrame)
self.txtResults.setFrameShadow(QtWidgets.QFrame.Plain)
self.txtResults.setAcceptRichText(False)
self.txtResults.setTextInteractionFlags((QtCore.Qt.TextSelectableByKeyboard | QtCore.Qt.TextSelectableByMouse))
self.txtResults.setObjectName('txtResults')
self.groupBox_4 = QtWidgets.QGroupBox(MAIAN)
self.groupBox_4.setGeometry(QtCore.QRect(10, 760, 931, 51))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.groupBox_4.setFont(font)
self.groupBox_4.setTitle('')
self.groupBox_4.setObjectName('groupBox_4')
self.lineSolidityName_2 = QtWidgets.QLineEdit(self.groupBox_4)
self.lineSolidityName_2.setGeometry(QtCore.QRect(10, 10, 951, 21))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.lineSolidityName_2.setFont(font)
self.lineSolidityName_2.setAutoFillBackground(False)
self.lineSolidityName_2.setStyleSheet('background-color:transparent;\n')
self.lineSolidityName_2.setFrame(False)
self.lineSolidityName_2.setReadOnly(True)
self.lineSolidityName_2.setObjectName('lineSolidityName_2')
self.retranslateUi(MAIAN)
QtCore.QMetaObject.connectSlotsByName(MAIAN)
self.pushStart.clicked.connect(self.start_thread)
self.txtLog.textChanged.connect(self.changed_log)
self.txtSolidity.textChanged.connect(self.changed_source)
self.last_pos = 0
self.locked_text = False
(str)
def changed_source(self):
tx = self.txtSolidity.toPlainText()
pt = '^[0-9A-Fa-fx ]+$'
remat = re.match(pt, tx)
if ((remat is None) and (tx.find('contract') >= 0)):
self.radioSolidity.setChecked(True)
ml = re.findall('contract[ |\t|\n]*[a-zA-Z0-9_]*', tx)
if (len(ml) == 0):
pass
elif (len(ml) == 1):
cnam = re.sub('contract[ |\t|\n]*', '', ml[0])
self.lineSolidityName.setText(cnam)
else:
pass
elif (remat is not None):
if (len(re.findall('', tx)) > 1):
self.radioBytecode.setChecked(True)
else:
self.radioBytecodecompiled.setChecked(True)
(str)
def changed_log(self):
if self.locked_text:
return
self.locked_text = True
tx = self.txtLog.toPlainText()
trl = ['0', '1', '91', '92', '93', '94']
for tz in trl:
tx = tx.replace((('\x1b[' + tz) + 'm'), '')
tx = tx.replace((('[' + tz) + 'm'), '')
self.txtLog.setText(tx)
cursor = self.txtLog.textCursor()
cursor.movePosition(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
self.txtLog.setTextCursor(cursor)
self.locked_text = False
t = ''
vp = vs = vg = False
if (self.txtLog.toPlainText().find('Check if contract is PRODIGAL') >= 0):
t += '<strong>Check on PRODIGAL </strong> <br />'
if (self.txtLog.toPlainText().find('The code does not have CALL/SUICIDE,') >= 0):
t += '<font color="green">Not vulnerable</font><br />'
if (self.txtLog.toPlainText().find('Leak vulnerability found') >= 0):
t += '<font color="red">Vulnerability found</font><br />'
vp = True
if (self.txtLog.toPlainText().find('Confirmed ! The contract is prodigal') >= 0):
t += '<font color="red">Vulnerability confirmed</font><br />'
if (self.txtLog.toPlainText().find('Cannot confirm the leak vulnerability') >= 0):
t += '<font color="blue">Cannot confirm the vulnerability</font><br />'
if (self.txtLog.toPlainText().find('Cannot confirm the bug because the contract is not deployed on the blockchain') >= 0):
t += '<font color="blue">Cannot confirm because there is no source code</font><br />'
if (self.txtLog.toPlainText().find('No prodigal vulnerability found') >= 0):
t += '<font color="green">Not vulnerable</font>'
if vp:
t += '(see the log below)<br />'
if (len(t) > 0):
t += '<br />'
if (self.txtLog.toPlainText().find('Check if contract is SUICIDAL') >= 0):
t += '<strong>Check on SUICIDAL </strong><br /> '
if (self.txtLog.toPlainText().find('Suicidal vulnerability found') >= 0):
t += '<font color="red">Vulnerability found</font><br />'
vs = True
if (self.txtLog.toPlainText().find('Confirmed ! The contract is suicidal') >= 0):
t += '<font color="red">Vulnerability confirmed</font><br />'
if (self.txtLog.toPlainText().find('Cannot confirm the suicide vulnerability') >= 0):
t += '<font color="blue">Cannot confirm the vulnerability</font><br />'
if (self.txtLog.toPlainText().find('The code does not contain SUICIDE instructions, hence it is not vulnerable') >= 0):
t += '<font color="green">Not vulnerable</font>'
if (self.txtLog.toPlainText().find('No suicidal vulnerability found') >= 0):
t += '<font color="green">Not vulnerable</font>'
if vs:
t += '(see the log below)<br />'
if (len(t) > 0):
t += '<br />'
if (self.txtLog.toPlainText().find('Check if contract is GREEDY') >= 0):
t += '<strong>Check on GREEDY </strong><br /> '
if (self.txtLog.toPlainText().find('No lock vulnerability found because the contract cannot receive Ether') >= 0):
t += '<font color="green">Not vulnerable</font>'
if (self.txtLog.toPlainText().find('No locking vulnerability found') >= 0):
t += '<font color="green">Not vulnerable</font>'
if (self.txtLog.toPlainText().find('The code does not have CALL/SUICIDE/DELEGATECALL/CALLCODE') >= 0):
t += '<font color="red">Vulnerability found</font><br />'
vg = True
if (self.txtLog.toPlainText().find('Locking vulnerability found') >= 0):
t += '<font color="red">Vulnerability found</font><br />'
vg = True
self.txtResults.setHtml(t)
(str)
def append_text(self, text):
self.txtLog.insertPlainText(text)
()
def start_thread(self):
self.txtLog.clear()
self.txtResults.clear()
contract = self.txtSolidity.toPlainText()
with open('out/lastcontract', 'w') as f:
f.write(contract.encode('utf-8'))
f.close()
if self.radioBytecode.isChecked():
type_of_contract = ['--bytecode_source', 'out/lastcontract']
pt = '^[0-9A-Fa-fx ]+$'
result = re.match(pt, self.txtSolidity.toPlainText())
if (result is None):
msg = QtWidgets.QMessageBox()
msg.setWindowTitle('Something went wrong')
msg.setText('The provided code is not bytecode.')
msg.exec_()
return
elif self.radioBytecodecompiled.isChecked():
type_of_contract = ['--bytecode', 'out/lastcontract']
pt = '^[0-9A-Fa-fx ]+$'
result = re.match(pt, self.txtSolidity.toPlainText())
if (result is None):
msg = QtWidgets.QMessageBox()
msg.setWindowTitle('Something went wrong')
msg.setText('The provided code is not bytecode.')
msg.exec_()
return
elif self.radioSolidity.isChecked():
conname = self.lineSolidityName.text()
if (len(conname) == 0):
msg = QtWidgets.QMessageBox()
msg.setWindowTitle('Something went wrong')
msg.setText('If the type of source code is Solidity, then you need to specify the main contract name.')
msg.exec_()
return
if (self.txtSolidity.toPlainText().find(conname) < 0):
msg = QtWidgets.QMessageBox()
msg.setWindowTitle('Something went wrong')
msg.setText((("Contract '" + conname) + "' does not exist in the Solidity code."))
msg.exec_()
return
type_of_contract = ['--soliditycode', 'out/lastcontract', conname]
max_inv = ['--max_inv', self.lineMaxFuncInv.text()]
stimout = ['--solve_timeout', self.lineSolverTimeout.text()]
perform_checks = []
if self.checkProdigal.isChecked():
perform_checks.append((((type_of_contract + max_inv) + stimout) + ['--check', '1']))
if self.checkSuicidal.isChecked():
perform_checks.append((((type_of_contract + max_inv) + stimout) + ['--check', '0']))
if self.checkGreedy.isChecked():
perform_checks.append((((type_of_contract + max_inv) + stimout) + ['--check', '2']))
self.thread = QtCore.QThread()
self.long_running_thing = LongRunningThing(perform_checks)
self.long_running_thing.moveToThread(self.thread)
self.thread.started.connect(self.long_running_thing.run)
self.thread.start()
def retranslateUi(self, MAIAN):
_translate = QtCore.QCoreApplication.translate
MAIAN.setWindowTitle(_translate('MAIAN', 'MAIAN v1.0'))
self.groupBox.setTitle(_translate('MAIAN', 'Settings'))
self.lineMaxFuncInv.setText(_translate('MAIAN', '3'))
self.label_4.setText(_translate('MAIAN', 'Max function invocations'))
self.label_5.setText(_translate('MAIAN', 'Solver timeout (msec)'))
self.lineSolverTimeout.setText(_translate('MAIAN', '10000'))
self.groupBox_2.setToolTip(_translate('MAIAN', 'The name of the main contract'))
self.groupBox_2.setTitle(_translate('MAIAN', 'Type of contract code'))
self.txtSolidity.setProperty('placeholderText', _translate('MAIAN', 'Put your code here. Usually, the type is recognized automatically.'))
self.radioSolidity.setText(_translate('MAIAN', 'Solidity source code'))
self.radioBytecodecompiled.setText(_translate('MAIAN', 'Bytecode compiled'))
self.radioBytecode.setText(_translate('MAIAN', 'Bytecode source'))
self.lineSolidityName.setPlaceholderText(_translate('MAIAN', 'Main contract name'))
self.label_6.setText(_translate('MAIAN', 'Contract name'))
self.groupBox_3.setTitle(_translate('MAIAN', 'Run'))
self.txtLog.setProperty('placeholderText', _translate('MAIAN', 'Log will appear here'))
self.pushStart.setText(_translate('MAIAN', 'START'))
self.checkGreedy.setText(_translate('MAIAN', 'Check on Greedy'))
self.checkSuicidal.setText(_translate('MAIAN', 'Check on Suicidal'))
self.checkProdigal.setText(_translate('MAIAN', 'Check on Prodigal'))
self.txtResults.setHtml(_translate('MAIAN', '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" " name="qrichtext" content="1" /><style type="text/css">\np, li { white-space: pre-wrap; }\n</style></head><body style=" font-family:\'Linux Biolinum O\'; font-size:15pt; font-weight:400; font-style:normal;">\n<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Geneva\'; font-size:10pt;"><br /></p></body></html>'))
self.txtResults.setProperty('placeholderText', _translate('MAIAN', 'Main results will appear here'))
self.lineSolidityName_2.setText(_translate('MAIAN', 'To keep MAIAN free and up to date, consider donating Ether to our account: 0xfd03b29b5c20f878836a3badf24f4a06')) |
def plot_shelf_freqs(treble):
Rpot = 10000.0
C = 3.9e-09
G1 = (1.0 / 100000.0)
G2 = (1.0 / (1800.0 + ((1 - treble) * Rpot)))
G3 = (1.0 / (4700.0 + (treble * Rpot)))
G4 = (1.0 / 100000.0)
b0 = (C * (G1 + G2))
b1 = (G1 * (G2 + G3))
a0 = (C * (G3 - G4))
a1 = ((- G4) * (G2 + G3))
(w, h) = signal.freqs([b0, b1], [a0, a1], worN=(np.logspace(0, 4.3, 1000) * (2 * np.pi)))
print(np.roots([a0, a1]))
plt.semilogx((w / (2 * np.pi)), (20 * np.log10(abs(h)))) |
_model
def repvgg_b1(pretrained=False, **kwargs):
return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs) |
def visualizeHiddenMain(args):
np.random.seed(0)
ConfigureGPU(args)
(data, dataset) = GetDataset(args)
if (('model' in args) and (args['model'] is not None)):
model = MakeModel(taskdef=None, **args)
model.validate = True
model.load(world=None, **data)
prev_option = model.null_option
for i in range(100):
I = np.random.random((1, 64, 64, 3))
plt.figure()
h = model.encode(I)
h = np.random.random((1, 8, 8, 8))
plt.subplot(1, 4, 1)
Show(np.mean(h[0], axis=(- 1)))
Id = model.decode(h)
plt.subplot(1, 4, 2)
Show(Id[0])
for j in range(200):
h = model.transform(h, h, np.array([np.random.randint(model.num_options)]))
h2 = h
plt.subplot(1, 4, 3)
Show(np.mean(h2[0], axis=(- 1)))
Id = model.decode(h2)
plt.subplot(1, 4, 4)
Show(Id[0])
plt.show()
else:
raise RuntimeError('Must provide a model to load') |
def convert_color_factory(src, dst):
code = getattr(cv2, 'COLOR_{}2{}'.format(src.upper(), dst.upper()))
def convert_color(img):
out_img = cv2.cvtColor(img, code)
return out_img
convert_color.__doc__ = 'Convert a {0} image to {1} image.\n\n Args:\n img (ndarray or str): The input image.\n\n Returns:\n ndarray: The converted {1} image.\n '.format(src.upper(), dst.upper())
return convert_color |
class ResidualGenerator(nn.Module):
def __init__(self, network):
super(ResidualGenerator, self).__init__()
self.network = network
pass
def forward(self, epe_batch):
return make_residual(epe_batch.img, self.network(epe_batch)) |
def set_default_general_args(args):
args.checkpoint_activations = getattr(args, 'checkpoint_activations', False)
args.offload_activations = getattr(args, 'offload_activations', False)
args.min_params_to_wrap = getattr(args, 'min_params_to_wrap', int(.0))
args.max_positions = getattr(args, 'max_positions', 3000) |
class RetriBertTokenizer(BertTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
model_input_names = ['input_ids', 'attention_mask'] |
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
match = Match('#include\\s*"([^/]+\\.h)"', line)
if (match and (not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)))):
error(filename, linenum, 'build/include', 4, 'Include the directory when naming .h files')
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if (duplicate_line >= 0):
error(filename, linenum, 'build/include', 4, ('"%s" already included at %s:%s' % (include, filename, duplicate_line)))
elif (include.endswith('.cc') and (os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include))):
error(filename, linenum, 'build/include', 4, 'Do not include .cc files from other packages')
elif (not _THIRD_PARTY_HEADERS_PATTERN.match(include)):
include_state.include_list[(- 1)].append((include, linenum))
error_message = include_state.CheckNextIncludeOrder(_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4, ('%s. Should be: %s.h, c system, c++ system, other.' % (error_message, fileinfo.BaseName())))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if (not include_state.IsInAlphabeticalOrder(clean_lines, linenum, canonical_include)):
error(filename, linenum, 'build/include_alpha', 4, ('Include "%s" not in alphabetical order' % include))
include_state.SetLastHeader(canonical_include) |
class AverageValueListMeter(MultipleAverageValueMeter):
def _add(self, list_value: List[float], **kwargs):
for (i, v) in enumerate(list_value):
self._meter_dicts[str(i)].add(v) |
class CaseConfigParser(ConfigParser.ConfigParser):
def optionxform(self, optionstr):
return optionstr |
def visualise_ik(solver, env):
for pose in Tep:
(q, success, iterations, searches, residual) = solver.solve(ets, pose, q0)
print(f'Successful: {success}, iterations: {iterations}, searches: {searches}, residual: {residual}')
panda.q = q
goal_axes.T = pose
ee_axes.T = panda.fkine(q)
env.step(0)
time.sleep(2) |
class CLIPImageProcessor(BaseImageProcessor):
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Dict[(str, int)]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Dict[(str, int)]=None, do_rescale: bool=True, rescale_factor: Union[(int, float)]=(1 / 255), do_normalize: bool=True, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None, do_convert_rgb: bool=True, **kwargs) -> None:
super().__init__(**kwargs)
size = (size if (size is not None) else {'shortest_edge': 224})
size = get_size_dict(size, default_to_square=False)
crop_size = (crop_size if (crop_size is not None) else {'height': 224, 'width': 224})
crop_size = get_size_dict(crop_size, default_to_square=True, param_name='crop_size')
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = (image_mean if (image_mean is not None) else OPENAI_CLIP_MEAN)
self.image_std = (image_std if (image_std is not None) else OPENAI_CLIP_STD)
self.do_convert_rgb = do_convert_rgb
def resize(self, image: np.ndarray, size: Dict[(str, int)], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
size = get_size_dict(size, default_to_square=False)
if ('shortest_edge' not in size):
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}')
output_size = get_resize_output_image_size(image, size=size['shortest_edge'], default_to_square=False)
return resize(image, size=output_size, resample=resample, data_format=data_format, **kwargs)
def center_crop(self, image: np.ndarray, size: Dict[(str, int)], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
size = get_size_dict(size)
if (('height' not in size) or ('width' not in size)):
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}')
return center_crop(image, size=(size['height'], size['width']), data_format=data_format, **kwargs)
def rescale(self, image: np.ndarray, scale: Union[(int, float)], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs):
return rescale(image, scale=scale, data_format=data_format, **kwargs)
def normalize(self, image: np.ndarray, mean: Union[(float, List[float])], std: Union[(float, List[float])], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs)
def preprocess(self, images: ImageInput, do_resize: bool=None, size: Dict[(str, int)]=None, resample: PILImageResampling=None, do_center_crop: bool=None, crop_size: int=None, do_rescale: bool=None, rescale_factor: float=None, do_normalize: bool=None, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None, do_convert_rgb: bool=None, return_tensors: Optional[Union[(str, TensorType)]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, **kwargs) -> PIL.Image.Image:
do_resize = (do_resize if (do_resize is not None) else self.do_resize)
size = (size if (size is not None) else self.size)
size = get_size_dict(size, param_name='size', default_to_square=False)
resample = (resample if (resample is not None) else self.resample)
do_center_crop = (do_center_crop if (do_center_crop is not None) else self.do_center_crop)
crop_size = (crop_size if (crop_size is not None) else self.crop_size)
crop_size = get_size_dict(crop_size, param_name='crop_size', default_to_square=True)
do_rescale = (do_rescale if (do_rescale is not None) else self.do_rescale)
rescale_factor = (rescale_factor if (rescale_factor is not None) else self.rescale_factor)
do_normalize = (do_normalize if (do_normalize is not None) else self.do_normalize)
image_mean = (image_mean if (image_mean is not None) else self.image_mean)
image_std = (image_std if (image_std is not None) else self.image_std)
do_convert_rgb = (do_convert_rgb if (do_convert_rgb is not None) else self.do_convert_rgb)
images = make_list_of_images(images)
if (not valid_images(images)):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')
if (do_resize and (size is None)):
raise ValueError('Size must be specified if do_resize is True.')
if (do_center_crop and (crop_size is None)):
raise ValueError('Crop size must be specified if do_center_crop is True.')
if (do_rescale and (rescale_factor is None)):
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if (do_normalize and ((image_mean is None) or (image_std is None))):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
images = [to_numpy_array(image) for image in images]
if do_resize:
images = [self.resize(image=image, size=size, resample=resample) for image in images]
if do_center_crop:
images = [self.center_crop(image=image, size=crop_size) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor) for image in images]
if do_normalize:
images = [self.normalize(image=image, mean=image_mean, std=image_std) for image in images]
images = [to_channel_dimension_format(image, data_format) for image in images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors) |
def add_choropleth_traces(fig, df, plotting_cols, counties_json=None, visible=False, show_hovertext=False, colorbar_title='Deaths', color_scl=[[0.0, '#FFFFFF'], [0.2, '#B96D67'], [0.4, '#A83C3B'], [0.6, '#8B2222'], [0.8, '#5B0D0D'], [1.0, '#5A2318']], value_labels=['Deaths: ']):
if (counties_json is None):
counties_json = json.load(open(oj(parentdir, 'data', 'geojson-counties-fips.json'), 'r'))
def make_choropleth_trace(values, fips, text=None):
choropleth_trace = go.Choropleth(visible=visible, colorscale=color_scl, z=values, text=text, geojson=counties_json, locations=fips, hoverinfo='skip', colorbar_title=colorbar_title)
return choropleth_trace
assert ((len(value_labels) == 1) or (len(value_labels) == len(plotting_cols)))
for (i, col) in enumerate(plotting_cols):
values = df[col]
if np.any((values < 0)):
values[(values < 0)] = 0
fips = df['countyFIPS']
if show_hovertext:
if (len(value_labels) == 1):
label = value_labels[0]
else:
label = value_labels[i]
text = (((label + values.round().astype(str)) + '<br>') + df['text'].tolist())
else:
text = None
choropleth_trace = make_choropleth_trace(values, fips, text)
fig.add_trace(choropleth_trace)
return None |
def load_histopathologyGray(args, **kwargs):
args.input_size = [1, 28, 28]
args.input_type = 'gray'
args.dynamic_binarization = False
with open('datasets/HistopathologyGray/histopathology.pkl', 'rb') as f:
data = pickle.load(f)
x_train = np.asarray(data['training']).reshape((- 1), (28 * 28))
x_val = np.asarray(data['validation']).reshape((- 1), (28 * 28))
x_test = np.asarray(data['test']).reshape((- 1), (28 * 28))
x_train = np.clip(x_train, (1.0 / 512.0), (1.0 - (1.0 / 512.0)))
x_val = np.clip(x_val, (1.0 / 512.0), (1.0 - (1.0 / 512.0)))
x_test = np.clip(x_test, (1.0 / 512.0), (1.0 - (1.0 / 512.0)))
y_train = np.zeros((x_train.shape[0], 1))
y_val = np.zeros((x_val.shape[0], 1))
y_test = np.zeros((x_test.shape[0], 1))
train = data_utils.TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.test_batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.test_batch_size, shuffle=True, **kwargs)
if (args.use_training_data_init == 1):
args.pseudoinputs_std = 0.01
init = x_train[0:args.number_components].T
args.pseudoinputs_mean = torch.from_numpy((init + (args.pseudoinputs_std * np.random.randn(np.prod(args.input_size), args.number_components)))).float()
else:
args.pseudoinputs_mean = 0.4
args.pseudoinputs_std = 0.05
return (train_loader, val_loader, test_loader, args) |
class NetworkCIFAR(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkCIFAR, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
self.drop_path_prob = 0.5
stem_multiplier = 3
C_curr = (stem_multiplier * C)
self.stem = nn.Sequential(nn.Conv2d(3, C_curr, 3, padding=1, bias=False), nn.BatchNorm2d(C_curr))
(C_prev_prev, C_prev, C_curr) = (C_curr, C_curr, C)
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if (i in [(layers // 3), ((2 * layers) // 3)]):
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
(C_prev_prev, C_prev) = (C_prev, (cell.multiplier * C_curr))
if (i == ((2 * layers) // 3)):
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadCIFAR(C_to_auxiliary, num_classes)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = s1 = self.stem(input)
for (i, cell) in enumerate(self.cells):
(s0, s1) = (s1, cell(s0, s1, self.drop_path_prob))
if (i == ((2 * self._layers) // 3)):
if (self._auxiliary and self.training):
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), (- 1)))
return (logits, logits_aux) |
def find_version():
version_file = 'dassl/__init__.py'
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__'] |
class ValidActionsMultiAgentEnv(MultiAgentEnv):
def __init__(self):
super(ValidActionsMultiAgentEnv, self).__init__()
self.observation_length = None
self.orig_observation_length = None |
class Generator(object):
def __init__(self):
self.z_dim = 100
self.x_dim = [64, 64, 1]
self.name = 'face_test/dcgan/g_net'
def __call__(self, z):
with tf.variable_scope(self.name) as vs:
bs = tf.shape(z)[0]
fc1 = tc.layers.fully_connected(z, 1024, weights_initializer=tf.random_normal_initializer(stddev=0.02), weights_regularizer=tc.layers.l2_regularizer(2.5e-05), activation_fn=tf.identity)
fc1 = tc.layers.batch_norm(fc1)
fc1 = tf.nn.relu(fc1)
fc2 = tc.layers.fully_connected(fc1, ((16 * 16) * 128), weights_initializer=tf.random_normal_initializer(stddev=0.02), weights_regularizer=tc.layers.l2_regularizer(2.5e-05), activation_fn=tf.identity)
fc2 = tf.reshape(fc2, tf.stack([bs, 16, 16, 128]))
fc2 = tc.layers.batch_norm(fc2)
fc2 = tf.nn.relu(fc2)
conv1 = tc.layers.convolution2d_transpose(fc2, 64, [4, 4], [2, 2], weights_initializer=tf.random_normal_initializer(stddev=0.02), weights_regularizer=tc.layers.l2_regularizer(2.5e-05), activation_fn=tf.identity)
conv1 = tc.layers.batch_norm(conv1)
conv1 = tf.nn.relu(conv1)
conv2 = tc.layers.convolution2d_transpose(conv1, 1, [4, 4], [2, 2], weights_initializer=tf.random_normal_initializer(stddev=0.02), weights_regularizer=tc.layers.l2_regularizer(2.5e-05), activation_fn=tf.sigmoid)
conv2 = tf.reshape(conv2, tf.stack([bs, 64, 64, 1]))
return conv2
def vars(self):
return [var for var in tf.global_variables() if (self.name in var.name)] |
def add_robust_features(df):
df['X_95_quantile'] = np.array([np.quantile(df.iloc[i].X, 0.95) for i in range(len(df))])
df['X_mad'] = np.array([robust.mad(df.iloc[i].X) for i in range(len(df))])
return df |
def main():
(x, y) = (Var('x'), Var('y'))
if True:
gradient = Func('gradient')
gradient[(x, y)] = (x + y)
gradient.trace_stores()
print('Evaluating gradient row-major')
output = gradient.realize(4, 4)
print('Equivalent C:')
for yy in range(4):
for xx in range(4):
print(('Evaluating at x = %d, y = %d: %d' % (xx, yy, (xx + yy))))
print('\n')
print('Pseudo-code for the schedule:')
gradient.print_loop_nest()
print()
if True:
gradient = Func('gradient_col_major')
print('x, y', x, y)
gradient[(x, y)] = (x + y)
gradient.trace_stores()
gradient.reorder(y, x)
print('Evaluating gradient column-major')
output = gradient.realize(4, 4)
print('Equivalent C:')
for yy in range(4):
for xx in range(4):
print(('Evaluating at x = %d, y = %d: %d' % (xx, yy, (xx + yy))))
print()
print('Pseudo-code for the schedule:')
gradient.print_loop_nest()
print()
if True:
gradient = Func('gradient_split')
gradient[(x, y)] = (x + y)
gradient.trace_stores()
(x_outer, x_inner) = (Var('x_outer'), Var('x_inner'))
gradient.split(x, x_outer, x_inner, 2)
print('Evaluating gradient with x split into x_outer and x_inner ')
output = gradient.realize(4, 4)
print('Equivalent C:')
for yy in range(4):
for x_outer in range(2):
for x_inner in range(2):
xx = ((x_outer * 2) + x_inner)
print(('Evaluating at x = %d, y = %d: %d' % (xx, yy, (xx + yy))))
print()
print('Pseudo-code for the schedule:')
gradient.print_loop_nest()
print()
if True:
gradient = Func('gradient_fused')
gradient[(x, y)] = (x + y)
fused = Var('fused')
gradient.fuse(x, y, fused)
print('Evaluating gradient with x and y fused')
output = gradient.realize(4, 4)
print('Equivalent C:')
for fused in range((4 * 4)):
yy = (fused / 4)
xx = (fused % 4)
print(('Evaluating at x = %d, y = %d: %d' % (xx, yy, (xx + yy))))
print()
print('Pseudo-code for the schedule:')
gradient.print_loop_nest()
print()
if True:
gradient = Func('gradient_tiled')
gradient[(x, y)] = (x + y)
gradient.trace_stores()
(x_outer, x_inner, y_outer, y_inner) = (Var(), Var(), Var(), Var())
gradient.split(x, x_outer, x_inner, 2)
gradient.split(y, y_outer, y_inner, 2)
gradient.reorder(x_inner, y_inner, x_outer, y_outer)
print('Evaluating gradient in 2x2 tiles')
output = gradient.realize(4, 4)
print('Equivalent C:')
for y_outer in range(2):
for x_outer in range(2):
for y_inner in range(2):
for x_inner in range(2):
xx = ((x_outer * 2) + x_inner)
yy = ((y_outer * 2) + y_inner)
print(('Evaluating at x = %d, y = %d: %d' % (xx, yy, (xx + yy))))
print()
print('Pseudo-code for the schedule:')
gradient.print_loop_nest()
print()
if True:
gradient = Func('gradient_in_vectors')
gradient[(x, y)] = (x + y)
gradient.trace_stores()
(x_outer, x_inner) = (Var('x_outer'), Var('x_inner'))
gradient.split(x, x_outer, x_inner, 4)
gradient.vectorize(x_inner)
print('Evaluating gradient with x_inner vectorized ')
output = gradient.realize(8, 4)
print('Equivalent C:')
for yy in range(4):
for x_outer in range(2):
x_vec = [((x_outer * 4) + 0), ((x_outer * 4) + 1), ((x_outer * 4) + 2), ((x_outer * 4) + 3)]
val = [(x_vec[0] + yy), (x_vec[1] + yy), (x_vec[2] + yy), (x_vec[3] + yy)]
print(('Evaluating at <%d, %d, %d, %d>, <%d, %d, %d, %d>: <%d, %d, %d, %d>' % (x_vec[0], x_vec[1], x_vec[2], x_vec[3], yy, yy, yy, yy, val[0], val[1], val[2], val[3])))
print()
print('Pseudo-code for the schedule:')
gradient.print_loop_nest()
print()
if True:
gradient = Func('gradient_in_vectors')
gradient[(x, y)] = (x + y)
gradient.trace_stores()
(x_outer, x_inner) = (Var('x_outer'), Var('x_inner'))
gradient.split(x, x_outer, x_inner, 2)
gradient.unroll(x_inner)
print('Evaluating gradient unrolled by a factor of two')
result = gradient.realize(4, 4)
print('Equivalent C:')
for yy in range(4):
for x_outer in range(2):
if True:
x_inner = 0
xx = ((x_outer * 2) + x_inner)
print(('Evaluating at x = %d, y = %d: %d' % (xx, yy, (xx + yy))))
if True:
x_inner = 1
xx = ((x_outer * 2) + x_inner)
print(('Evaluating at x = %d, y = %d: %d' % (xx, yy, (xx + yy))))
print()
print('Pseudo-code for the schedule:')
gradient.print_loop_nest()
print()
if True:
gradient = Func('gradient_split_5x4')
gradient[(x, y)] = (x + y)
gradient.trace_stores()
(x_outer, x_inner) = (Var('x_outer'), Var('x_inner'))
gradient.split(x, x_outer, x_inner, 2)
print('Evaluating gradient over a 5x4 box with x split by two ')
output = gradient.realize(5, 4)
print('Equivalent C:')
for yy in range(4):
for x_outer in range(3):
for x_inner in range(2):
xx = (x_outer * 2)
if (xx > 3):
xx = 3
xx += x_inner
print(('Evaluating at x = %d, y = %d: %d' % (xx, yy, (xx + yy))))
print()
print('Pseudo-code for the schedule:')
gradient.print_loop_nest()
print()
if True:
gradient = Func('gradient_fused_tiles')
gradient[(x, y)] = (x + y)
gradient.trace_stores()
(x_outer, y_outer) = (Var('x_outer'), Var('y_outer'))
(x_inner, y_inner) = (Var('x_inner'), Var('y_inner'))
tile_index = Var('tile_index')
gradient.tile(x, y, x_outer, y_outer, x_inner, y_inner, 2, 2)
gradient.fuse(x_outer, y_outer, tile_index)
gradient.parallel(tile_index)
print('Evaluating gradient tiles in parallel')
output = gradient.realize(4, 4)
print('Equivalent (serial) C:')
for tile_index in range(4):
y_outer = (tile_index / 2)
x_outer = (tile_index % 2)
for y_inner in range(2):
for x_inner in range(2):
yy = ((y_outer * 2) + y_inner)
xx = ((x_outer * 2) + x_inner)
print(('Evaluating at x = %d, y = %d: %d' % (xx, yy, (xx + yy))))
print()
print('Pseudo-code for the schedule:')
gradient.print_loop_nest()
print()
if True:
gradient_fast = Func('gradient_fast')
gradient_fast[(x, y)] = (x + y)
(x_outer, y_outer) = (Var('x_outer'), Var('y_outer'))
(x_inner, y_inner) = (Var('x_inner'), Var('y_inner'))
tile_index = Var('tile_index')
gradient_fast.tile(x, y, x_outer, y_outer, x_inner, y_inner, 256, 256).fuse(x_outer, y_outer, tile_index).parallel(tile_index)
(x_inner_outer, y_inner_outer) = (Var('x_inner_outer'), Var('y_inner_outer'))
(x_vectors, y_pairs) = (Var('x_vectors'), Var('y_pairs'))
gradient_fast.tile(x_inner, y_inner, x_inner_outer, y_inner_outer, x_vectors, y_pairs, 4, 2).vectorize(x_vectors).unroll(y_pairs)
result = gradient_fast.realize(800, 600)
print('Checking Halide result against equivalent C...')
for tile_index in range((4 * 3)):
y_outer = (tile_index // 4)
x_outer = (tile_index % 4)
for y_inner_outer in range((256 // 2)):
for x_inner_outer in range((256 // 4)):
xx = (min_((x_outer * 256), (800 - 256)) + (x_inner_outer * 4))
x_vec = [(xx + 0), (xx + 1), (xx + 2), (xx + 3)]
y_base = (min_((y_outer * 256), (600 - 256)) + (y_inner_outer * 2))
if True:
yy = (y_base + 0)
y_vec = [yy, yy, yy, yy]
val = [(x_vec[0] + y_vec[0]), (x_vec[1] + y_vec[1]), (x_vec[2] + y_vec[2]), (x_vec[3] + y_vec[3])]
for i in range(4):
if (result(x_vec[i], y_vec[i]) != val[i]):
print(('There was an error at %d %d!' % (x_vec[i], y_vec[i])))
return (- 1)
if True:
yy = (y_base + 1)
y_vec = [yy, yy, yy, yy]
val = [(x_vec[0] + y_vec[0]), (x_vec[1] + y_vec[1]), (x_vec[2] + y_vec[2]), (x_vec[3] + y_vec[3])]
for i in range(4):
if (result(x_vec[i], y_vec[i]) != val[i]):
print(('There was an error at %d %d!' % (x_vec[i], y_vec[i])))
print()
print('Pseudo-code for the schedule:')
gradient_fast.print_loop_nest()
print()
print('Success!')
return 0 |
def update_model(old_model):
if (not _check_model_old_version(old_model)):
return old_model
new_model = copy.deepcopy(old_model)
for idx in range(0, len(new_model.WN)):
wavenet = new_model.WN[idx]
wavenet.res_skip_layers = torch.nn.ModuleList()
n_channels = wavenet.n_channels
n_layers = wavenet.n_layers
for i in range(0, n_layers):
if (i < (n_layers - 1)):
res_skip_channels = (2 * n_channels)
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
skip_layer = torch.nn.utils.remove_weight_norm(wavenet.skip_layers[i])
if (i < (n_layers - 1)):
res_layer = torch.nn.utils.remove_weight_norm(wavenet.res_layers[i])
res_skip_layer.weight = torch.nn.Parameter(torch.cat([res_layer.weight, skip_layer.weight]))
res_skip_layer.bias = torch.nn.Parameter(torch.cat([res_layer.bias, skip_layer.bias]))
else:
res_skip_layer.weight = torch.nn.Parameter(skip_layer.weight)
res_skip_layer.bias = torch.nn.Parameter(skip_layer.bias)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
wavenet.res_skip_layers.append(res_skip_layer)
del wavenet.res_layers
del wavenet.skip_layers
return new_model |
_elapsed_time(customized_msg='Customized eval_func')
def eval_func(infer_graph):
dataset = Dataset(FLAGS.input_file, FLAGS.vocab_file)
sorted_keys = dataset.sorted_keys
dataloader = DataLoader(framework='tensorflow', dataset=dataset, batch_size=FLAGS.batch_size, collate_fn=collate_fn)
input_tensors = list(map(infer_graph.get_tensor_by_name, INPUT_TENSOR_NAMES))
output_tensors = list(map(infer_graph.get_tensor_by_name, OUTPUT_TENSOR_NAMES))
session_config = tf.compat.v1.ConfigProto(inter_op_parallelism_threads=FLAGS.num_inter, intra_op_parallelism_threads=FLAGS.num_intra)
with tf.compat.v1.Session(config=session_config, graph=infer_graph) as sess:
time_list = []
translations = []
warmup = (FLAGS.warmup_steps if (FLAGS.warmup_steps > 0) else 0)
iteration = (- 1)
if (FLAGS.benchmark and (FLAGS.mode == 'performance')):
iteration = FLAGS.iters
assert (iteration >= warmup), "'iteration' must be greater than or equal to warmup."
logger.info(' Start to get performance of the model ')
else:
logger.info(' Start to get accuracy and performance of the model ')
assert (iteration != 0), "'iteration' cannot be zero."
assert (iteration <= len(dataloader)), "'iteration' must be less than or equal to len(dataloader)."
if (warmup > 0):
logger.info('Start to do warm-up with {}/{} (steps/total_iterations) before getting performance.'.format(warmup, iteration))
else:
logger.info('Start to get performance with {} iterations.'.format(iteration))
for (idx, (input_data, _)) in enumerate(dataloader):
if ((idx == warmup) and (warmup > 0)):
logger.info('The warm-up is over.')
logger.info('Start to get performance with {}/{} (steps/total_iterations).'.format((iteration - warmup), iteration))
feed_dict = {input_tensors[0]: input_data}
time_start = time.time()
dec_tensor = sess.run(output_tensors, feed_dict)
duration = (time.time() - time_start)
time_list.append(duration)
translations.append(dec_tensor)
if (iteration == (idx + 1)):
break
latency = (np.array(time_list[warmup:]).mean() / FLAGS.batch_size)
if (FLAGS.benchmark and (FLAGS.mode == 'performance')):
logger.info('Batch-size = {}'.format(FLAGS.batch_size))
logger.info('Latency: {:.3f} ms'.format((latency * 1000)))
logger.info('Throughput: {:.3f} items/sec'.format((1.0 / latency)))
if (FLAGS.mode != 'performance'):
translation_count = 0
decoded_translations = []
subtokenizer = Subtokenizer(FLAGS.vocab_file)
for (i, tr) in enumerate(translations):
for (j, itr) in enumerate(tr):
for (k, otr) in enumerate(itr):
translation_count += 1
decoded_translations.append(_trim_and_decode(otr, subtokenizer))
logger.info(('Total number of sentences translated:%d' % translation_count))
tf.io.gfile.makedirs(os.path.dirname(FLAGS.file_out))
with tf.io.gfile.GFile(FLAGS.file_out, 'w') as f:
for i in sorted_keys:
f.write(('%s\n' % decoded_translations[i]))
global uregex
uregex = UnicodeRegex()
score_uncased = bleu_wrapper(FLAGS.reference_file, FLAGS.file_out, False)
logger.info('Case-insensitive results: {:.8f}'.format(score_uncased))
score_cased = bleu_wrapper(FLAGS.reference_file, FLAGS.file_out, True)
logger.info('Case-sensitive results: {:.8f}'.format(score_cased))
assert (FLAGS.bleu_variant in ['uncased', 'cased']), "'bleu_variant' must be one of two options: 'uncased'/'cased'."
if (FLAGS.bleu_variant == 'uncased'):
logger.info('Accuracy: {:.8f}'.format(score_uncased))
return score_uncased
else:
logger.info('Accuracy: {:.8f}'.format(score_cased))
return score_cased |
class AsTypeTransformer(AutotabularPreprocessingAlgorithm):
def __init__(self, dtype, random_state: Optional[np.random.RandomState]=None):
assert (dtype is not None)
self.dtype = dtype
super(AsTypeTransformer, self).__init__()
def fit(self, X, y=None):
return self
def transform(self, X):
return X.astype(self.dtype)
def get_properties(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> Dict[(str, Optional[Union[(str, int, bool, Tuple)]])]:
return {'shortname': 'AsTypeTransformer', 'name': 'AsTypeTransformer', 'handles_regression': True, 'handles_classification': True, 'handles_multiclass': True, 'handles_multilabel': True, 'handles_multioutput': True, 'handles_sparse': True, 'handles_dense': True, 'input': (DENSE, SPARSE, UNSIGNED_DATA), 'output': (INPUT,)}
def get_hyperparameter_search_space(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> ConfigurationSpace:
return ConfigurationSpace() |
class MaxPool(PlainNetBasicBlockClass):
def __init__(self, out_channels, kernel_size, stride, no_create=False, **kwargs):
super(MaxPool, self).__init__(**kwargs)
self.in_channels = out_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = ((kernel_size - 1) // 2)
self.no_create = no_create
if (not no_create):
self.netblock = nn.MaxPool2d(kernel_size=self.kernel_size, stride=self.stride, padding=self.padding)
def forward(self, x):
return self.netblock(x)
def __str__(self):
return 'MaxPool({},{},{})'.format(self.out_channels, self.kernel_size, self.stride)
def __repr__(self):
return 'MaxPool({}|{},{},{})'.format(self.block_name, self.out_channels, self.kernel_size, self.stride)
def get_output_resolution(self, input_resolution):
return (input_resolution // self.stride)
def get_FLOPs(self, input_resolution):
return 0
def get_model_size(self):
return 0
def set_in_channels(self, c):
self.in_channels = c
self.out_channels = c
if (not self.no_create):
self.netblock = nn.MaxPool2d(kernel_size=self.kernel_size, stride=self.stride, padding=self.padding)
def create_from_str(cls, s, no_create=False, **kwargs):
assert MaxPool.is_instance_from_str(s)
idx = _get_right_parentheses_index_(s)
assert (idx is not None)
param_str = s[len('MaxPool('):idx]
tmp_idx = param_str.find('|')
if (tmp_idx < 0):
tmp_block_name = 'uuid{}'.format(uuid.uuid4().hex)
else:
tmp_block_name = param_str[0:tmp_idx]
param_str = param_str[(tmp_idx + 1):]
param_str_split = param_str.split(',')
out_channels = int(param_str_split[0])
kernel_size = int(param_str_split[1])
stride = int(param_str_split[2])
return (MaxPool(out_channels=out_channels, kernel_size=kernel_size, stride=stride, no_create=no_create, block_name=tmp_block_name), s[(idx + 1):]) |
class LSGANLoss(nn.Module):
def __init__(self):
super(LSGANLoss, self).__init__()
def forward(self, x, y):
if (not isinstance(x, list)):
x = [x]
loss = 0.0
num = len(x)
for out in x:
loss += torch.mean(((out - y) ** 2))
loss /= num
return loss |
class State():
nodes: Node
windows: TimeWindow
coeffs: PenalityCoeff
vehicles: StateVehicle
order: chex.Array
step_count: chex.Array
action_mask: chex.Array
key: chex.PRNGKey |
def update_cache(cache_dir, names: List[str], bytes_io: List[io.BytesIO]):
cache_dir = pathlib.Path(os.path.expanduser(cache_dir))
for (i, _) in enumerate(names):
filepath = pathlib.Path(cache_dir, names[i])
with open(filepath, 'wb') as f:
f.write(bytes_io[i].getvalue()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.