code stringlengths 101 5.91M |
|---|
def main():
args = parse()
out_sents = []
with open(args.data_path, 'r') as fp:
sent_list = [x.strip() for x in fp.readlines()]
if (args.parallel_process_num > 1):
try:
import submitit
except ImportError:
logger.warn('submitit is not found and only one job is used to process the data')
submitit = None
if ((args.parallel_process_num == 1) or (submitit is None)):
out_sents = process_sents(sent_list, args)
else:
lsize = ((len(sent_list) // args.parallel_process_num) + 1)
executor = submitit.AutoExecutor(folder=args.logdir)
executor.update_parameters(timeout_min=1000, cpus_per_task=4)
jobs = []
for i in range(args.parallel_process_num):
job = executor.submit(process_sents, sent_list[(lsize * i):(lsize * (i + 1))], args)
jobs.append(job)
is_running = True
while is_running:
time.sleep(5)
is_running = (sum([job.done() for job in jobs]) < len(jobs))
out_sents = list(itertools.chain.from_iterable([job.result() for job in jobs]))
with open(args.out_path, 'w') as fp:
fp.write(('\n'.join(out_sents) + '\n')) |
.ignore
def _get_minimal_slice_set(start: Sequence[int], end: Sequence[int], dims: int, start_edges: Optional[Sequence[bool]]=None, end_edges: Optional[Sequence[bool]]=None) -> Sequence[Tuple[int]]:
def reduce_edge_list(l):
tally = 1
for i in range(len(l)):
reversed_idx = ((- 1) * (i + 1))
l[reversed_idx] *= tally
tally = l[reversed_idx]
if (start_edges is None):
start_edges = [(s == 0) for s in start]
reduce_edge_list(start_edges)
if (end_edges is None):
end_edges = [(e == (d - 1)) for (e, d) in zip(end, dims)]
reduce_edge_list(end_edges)
if (len(start) == 0):
return [tuple()]
elif (len(start) == 1):
return [(slice(start[0], (end[0] + 1)),)]
slices = []
path = []
for (s, e) in zip(start, end):
if (s == e):
path.append(slice(s, (s + 1)))
else:
break
path = tuple(path)
divergence_idx = len(path)
if (divergence_idx == len(dims)):
return [tuple(path)]
def upper():
sdi = start[divergence_idx]
return [((path + (slice(sdi, (sdi + 1)),)) + s) for s in _get_minimal_slice_set(start[(divergence_idx + 1):], [(d - 1) for d in dims[(divergence_idx + 1):]], dims[(divergence_idx + 1):], start_edges=start_edges[(divergence_idx + 1):], end_edges=[1 for _ in end_edges[(divergence_idx + 1):]])]
def lower():
edi = end[divergence_idx]
return [((path + (slice(edi, (edi + 1)),)) + s) for s in _get_minimal_slice_set([0 for _ in start[(divergence_idx + 1):]], end[(divergence_idx + 1):], dims[(divergence_idx + 1):], start_edges=[1 for _ in start_edges[(divergence_idx + 1):]], end_edges=end_edges[(divergence_idx + 1):])]
if (start_edges[divergence_idx] and end_edges[divergence_idx]):
slices.append((path + (slice(start[divergence_idx], (end[divergence_idx] + 1)),)))
elif start_edges[divergence_idx]:
slices.append((path + (slice(start[divergence_idx], end[divergence_idx]),)))
slices.extend(lower())
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append((path + (slice((start[divergence_idx] + 1), (end[divergence_idx] + 1)),)))
else:
slices.extend(upper())
middle_ground = (end[divergence_idx] - start[divergence_idx])
if (middle_ground > 1):
slices.append((path + (slice((start[divergence_idx] + 1), end[divergence_idx]),)))
slices.extend(lower())
return [tuple(s) for s in slices] |
class LevelMapper(object):
def __init__(self, k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e-06):
self.k_min = k_min
self.k_max = k_max
self.s0 = canonical_scale
self.lvl0 = canonical_level
self.eps = eps
def __call__(self, boxlists):
s = torch.sqrt(cat([boxlist.area() for boxlist in boxlists]))
target_lvls = torch.floor((self.lvl0 + torch.log2(((s / self.s0) + self.eps))))
target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max)
return (target_lvls.to(torch.int64) - self.k_min)
def get_random(self, level):
(xmin, ymin, xmax, ymax) = torch.tensor |
def rotate_and_omega_vec(vR, vT, vz, R, z, phi=0.0, t=0.0, rot=None, omega=None, omegadot=None, omegadotdot=None):
(x, y, z) = coords.cyl_to_rect(R, phi, z)
(vx, vy, vz) = coords.cyl_to_rect_vec(vR, vT, vz, phi=phi)
xyzp = numpy.dot(rot, numpy.array([x, y, z]))
(Rp, phip, zp) = coords.rect_to_cyl(xyzp[0], xyzp[1], xyzp[2])
vxyzp = numpy.dot(rot, numpy.array([vx, vy, vz]))
(vRp, vTp, vzp) = coords.rect_to_cyl_vec(vxyzp[0], vxyzp[1], vxyzp[2], xyzp[0], xyzp[1], xyzp[2])
phip += (omega * t)
vTp += (omega * Rp)
if (not (omegadot is None)):
phip += ((omegadot * (t ** 2.0)) / 2.0)
vTp += ((omegadot * t) * Rp)
if (not (omegadotdot is None)):
phip += ((omegadotdot * (t ** 3.0)) / 6.0)
vTp += (((omegadotdot * (t ** 2.0)) / 2.0) * Rp)
(xp, yp, zp) = coords.cyl_to_rect(Rp, phip, zp)
(vxp, vyp, vzp) = coords.cyl_to_rect_vec(vRp, vTp, vzp, phi=phip)
xyz = numpy.dot(rot.T, numpy.array([xp, yp, zp]))
vxyz = numpy.dot(rot.T, numpy.array([vxp, vyp, vzp]))
(vR, vT, vz) = coords.rect_to_cyl_vec(vxyz[0], vxyz[1], vxyz[2], xyz[0], xyz[1], xyz[2])
return (vR, vT, vz) |
class TestTorchModel(unittest.TestCase):
def setUpClass(self):
pass
def tearDownClass(self):
pass
def test_1(self):
if is_win():
return
with open('int8_pattern.conf', 'w') as f:
data = {'pattern_switch': {'Int8BF16MixedPrecisionChecker': True, 'MultiHeadAttention': True}}
json.dump(data, f)
init_input_ids = torch.ones(32).long()
init_input_ids[0] = 7454
input_ids = init_input_ids.clone()
attention_mask = torch.ones((len(input_ids) + 1))
attention_mask[0] = 0
position_ids = torch.arange(len(input_ids))
past_key_value_torch = tuple([(torch.zeros([1, 16, 32, 256]), torch.zeros([1, 16, 32, 256])) for i in range(28)])
past_key_value = tuple([(torch.zeros([1, 32, 16, 256]), torch.zeros([1, 32, 16, 256])) for i in range(28)])
input_ids = input_ids[0:1].unsqueeze(0)
attention_mask = attention_mask.unsqueeze(0)
position_ids = position_ids[0:1].unsqueeze(0)
pt_file = '/tf_dataset2/models/nlp_toolkit/gpt-j/best_model_bk.pt'
traced_model = torch.jit.load(pt_file)
ref_out = traced_model(input_ids, past_key_value_torch, attention_mask, position_ids)
graph = compile(pt_file, './int8_pattern.conf')
out = graph.inference([input_ids.numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), past_key_value[0][0].numpy(), attention_mask.numpy()])
diff = cmpData(ref_out[0].detach().numpy(), out['ret2995.1'])
print(diff)
os.remove('int8_pattern.conf') |
class TestFileChunker(unittest.TestCase):
_tmpdir: Optional[str] = None
_tmpfile: Optional[str] = None
_line_content = 'Hello, World\n'
_num_bytes = None
_num_lines = 200
_num_splits = 20
def setUpClass(cls) -> None:
cls._num_bytes = len(cls._line_content.encode('utf-8'))
cls._tmpdir = tempfile.mkdtemp()
with open(os.path.join(cls._tmpdir, 'test.txt'), 'w') as f:
cls._tmpfile = f.name
for _i in range(cls._num_lines):
f.write(cls._line_content)
f.flush()
def tearDownClass(cls) -> None:
if (cls._tmpdir is not None):
shutil.rmtree(cls._tmpdir)
def test_find_offsets(self):
from fairseq.file_chunker_utils import find_offsets
offsets = find_offsets(self._tmpfile, self._num_splits)
self.assertEqual(len(offsets), (self._num_splits + 1))
(zero, *real_offsets, last) = offsets
self.assertEqual(zero, 0)
for (i, o) in enumerate(real_offsets):
self.assertEqual(o, (self._num_bytes + ((((i + 1) * self._num_bytes) * self._num_lines) / self._num_splits)))
self.assertEqual(last, (self._num_bytes * self._num_lines))
def test_readchunks(self):
from fairseq.file_chunker_utils import Chunker, find_offsets
offsets = find_offsets(self._tmpfile, self._num_splits)
for (start, end) in zip(offsets, offsets[1:]):
with Chunker(self._tmpfile, start, end) as lines:
all_lines = list(lines)
num_lines = (self._num_lines / self._num_splits)
self.assertAlmostEqual(len(all_lines), num_lines, delta=1)
self.assertListEqual(all_lines, [self._line_content for _ in range(len(all_lines))]) |
def test_map():
y_true = torch.tensor([[True, False, True, False, True], [False, False, False, True, True], [True, True, False, True, False], [False, True, True, False, True]])
y_pred = torch.tensor([[0.2, 0.8, 0.5, 0.4, 0.3], [0.8, 0.2, 0.3, 0.9, 0.4], [0.2, 0.4, 0.5, 0.9, 0.8], [0.8, 0.2, 0.9, 0.3, 0.7]])
ap = []
for i in range(y_true.shape[0]):
ap.append(average_precision_score(y_true[i], y_pred[i]))
map = np.mean(ap)
assert (dm.map(y_true, y_pred, method='legacy') == pytest.approx(map)) |
class EvaluationResult():
def __init__(self, configuration: ParameterConfiguration, task_id, task, train_loss, val_loss, time_in_sec, snr, model_path):
self.configuration = configuration
self.task_id = task_id
self.task = task
self.train_loss = train_loss
self.val_loss = val_loss
self.time_in_sec = time_in_sec
self.snr = snr
self.model_path = model_path |
class SqlObserver(RunObserver):
def create(cls, url, echo=False, priority=DEFAULT_SQL_PRIORITY):
engine = sa.create_engine(url, echo=echo)
return cls(engine, sessionmaker(bind=engine)(), priority)
def __init__(self, engine, session, priority=DEFAULT_SQL_PRIORITY):
self.engine = engine
self.session = session
self.priority = priority
self.run = None
self.lock = Lock()
def started_event(self, ex_info, command, host_info, start_time, config, meta_info, _id):
Base.metadata.create_all(self.engine)
sql_exp = Experiment.get_or_create(ex_info, self.session)
sql_host = Host.get_or_create(host_info, self.session)
if (_id is None):
i = self.session.query(Run).order_by(Run.id.desc()).first()
_id = (0 if (i is None) else (i.id + 1))
self.run = Run(run_id=str(_id), start_time=start_time, config=json.dumps(flatten(config)), command=command, priority=meta_info.get('priority', 0), comment=meta_info.get('comment', ''), experiment=sql_exp, host=sql_host, status='RUNNING')
self.session.add(self.run)
self.save()
return (_id or self.run.run_id)
def queued_event(self, ex_info, command, host_info, queue_time, config, meta_info, _id):
Base.metadata.create_all(self.engine)
sql_exp = Experiment.get_or_create(ex_info, self.session)
sql_host = Host.get_or_create(host_info, self.session)
if (_id is None):
i = self.session.query(Run).order_by(Run.id.desc()).first()
_id = (0 if (i is None) else (i.id + 1))
self.run = Run(run_id=str(_id), config=json.dumps(flatten(config)), command=command, priority=meta_info.get('priority', 0), comment=meta_info.get('comment', ''), experiment=sql_exp, host=sql_host, status='QUEUED')
self.session.add(self.run)
self.save()
return (_id or self.run.run_id)
def heartbeat_event(self, info, captured_out, beat_time, result):
self.run.info = json.dumps(flatten(info))
self.run.captured_out = captured_out
self.run.heartbeat = beat_time
self.run.result = result
self.save()
def completed_event(self, stop_time, result):
self.run.stop_time = stop_time
self.run.result = result
self.run.status = 'COMPLETED'
self.save()
def interrupted_event(self, interrupt_time, status):
self.run.stop_time = interrupt_time
self.run.status = status
self.save()
def failed_event(self, fail_time, fail_trace):
self.run.stop_time = fail_time
self.run.fail_trace = '\n'.join(fail_trace)
self.run.status = 'FAILED'
self.save()
def resource_event(self, filename):
res = Resource.get_or_create(filename, self.session)
self.run.resources.append(res)
self.save()
def artifact_event(self, name, filename):
a = Artifact.create(name, filename)
self.run.artifacts.append(a)
self.save()
def save(self):
with self.lock:
self.session.commit()
def query(self, _id):
run = self.session.query(Run).filter_by(id=_id).first()
return run.to_json()
def __eq__(self, other):
if isinstance(other, SqlObserver):
return ((self.engine == other.engine) and (self.session == other.session))
return False
def __ne__(self, other):
return (not self.__eq__(other)) |
class _NCEBatch(object):
def __init__(self, context_size):
self.context_ids = ([] if (context_size > 0) else None)
self.doc_ids = []
self.target_noise_ids = []
def __len__(self):
return len(self.doc_ids)
def torch_(self):
if (self.context_ids is not None):
self.context_ids = torch.LongTensor(self.context_ids)
self.doc_ids = torch.LongTensor(self.doc_ids)
self.target_noise_ids = torch.LongTensor(self.target_noise_ids)
def cuda_(self):
if (self.context_ids is not None):
self.context_ids = self.context_ids.cuda()
self.doc_ids = self.doc_ids.cuda()
self.target_noise_ids = self.target_noise_ids.cuda() |
class CondConvResidual(InvertedResidual):
def __init__(self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, se_ratio=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, num_experts=0, drop_path_rate=0.0):
self.num_experts = num_experts
conv_kwargs = dict(num_experts=self.num_experts)
super(CondConvResidual, self).__init__(in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, dilation=dilation, pad_type=pad_type, act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size, pw_kernel_size=pw_kernel_size, se_ratio=se_ratio, se_kwargs=se_kwargs, norm_layer=norm_layer, norm_kwargs=norm_kwargs, conv_kwargs=conv_kwargs, drop_path_rate=drop_path_rate)
self.routing_fn = nn.Linear(in_chs, self.num_experts)
def forward(self, x):
residual = x
pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1)
routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs))
x = self.conv_pw(x, routing_weights)
x = self.bn1(x)
x = self.act1(x)
x = self.conv_dw(x, routing_weights)
x = self.bn2(x)
x = self.act2(x)
if (self.se is not None):
x = self.se(x)
x = self.conv_pwl(x, routing_weights)
x = self.bn3(x)
if self.has_residual:
if (self.drop_path_rate > 0.0):
x = drop_path(x, self.drop_path_rate, self.training)
x += residual
return x |
def avg_then_mlp_gnn(make_mlp_fn, epsilon):
avg_then_mlp_block = AggThenMLPBlock(tf.unsorted_segment_mean, make_mlp_fn, epsilon)
return NodeBlockGNN(avg_then_mlp_block) |
class Attribute(Param):
def __init__(self, xml_var, value_type, required=True, default=None, var=None):
Param.__init__(self, xml_var, value_type, required, default, var)
self.type = 'attribute'
def set_from_string(self, obj, value):
setattr(obj, self.var, self.value_type.from_string(value))
def add_to_xml(self, obj, node):
value = getattr(obj, self.var)
if (value is None):
if self.required:
raise Exception('Required attribute not set in object: {}'.format(self.var))
elif (not skip_default):
value = self.default
if (value is not None):
node.set(self.xml_var, self.value_type.to_string(value)) |
class CLIPTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = CLIPTokenizer
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token='<|endoftext|>', bos_token='<|startoftext|>', eos_token='<|endoftext|>', pad_token='<|endoftext|>', **kwargs):
super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs)
if (not isinstance(self.backend_tokenizer.pre_tokenizer, pre_tokenizers.Sequence)):
raise ValueError('The `backend_tokenizer` provided does not match the expected format. The CLIP tokenizer has been heavily modified from transformers version 4.17.0. You need to convert the tokenizer you are using to be compatible with this version.The easiest way to do so is `CLIPTokenizerFast.from_pretrained("path_to_local_folder_or_hub_repo, from_slow=True)`. If you want to use your existing tokenizer, you will have to revert to a version prior to 4.17.0 of transformers.')
self._wrap_decode_method_backend_tokenizer()
def _wrap_decode_method_backend_tokenizer(self):
orig_decode_method = self.backend_tokenizer.decode
def new_decode_method(*args, **kwargs):
text = orig_decode_method(*args, **kwargs)
text = text.replace(self.backend_tokenizer.model.end_of_word_suffix, ' ').strip()
return text
self.backend_tokenizer.decode = new_decode_method
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
bos_token = [self.bos_token_id]
eos_token = [self.eos_token_id]
if (token_ids_1 is None):
return ((bos_token + token_ids_0) + eos_token)
return (((((bos_token + token_ids_0) + eos_token) + eos_token) + token_ids_1) + eos_token)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
bos_token = [self.bos_token_id]
eos_token = [self.eos_token_id]
if (token_ids_1 is None):
return (len(((bos_token + token_ids_0) + eos_token)) * [0])
return (len((((((bos_token + token_ids_0) + eos_token) + eos_token) + token_ids_1) + eos_token)) * [0])
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files) |
def fourier(x, terms=10):
axis = (len(x.get_shape()) - 1)
x_list = []
for i in range(terms):
x_list.append(torch.sin((((2 * math.pi) * i) * x)))
x_list.append(torch.cos((((2 * math.pi) * i) * x)))
return torch.cat(x_list, axis) |
def load_bounding_boxes(object_detection_params, subject_path_list, slice_axis, constrast_lst):
bounding_box_dict = {}
if ((object_detection_params is None) or (object_detection_params[ObjectDetectionParamsKW.OBJECT_DETECTION_PATH] is None)):
return bounding_box_dict
bounding_box_path = Path(object_detection_params.get(ObjectDetectionParamsKW.PATH_OUTPUT), 'bounding_boxes.json')
if bounding_box_path.exists():
with bounding_box_path.open(mode='r') as fp:
bounding_box_dict = json.load(fp)
elif ((object_detection_params[ObjectDetectionParamsKW.OBJECT_DETECTION_PATH] is not None) and Path(object_detection_params.get(ObjectDetectionParamsKW.OBJECT_DETECTION_PATH)).exists()):
bounding_box_dict = generate_bounding_box_file(subject_path_list, object_detection_params[ObjectDetectionParamsKW.OBJECT_DETECTION_PATH], object_detection_params[ObjectDetectionParamsKW.PATH_OUTPUT], object_detection_params[ObjectDetectionParamsKW.GPU_IDS], slice_axis, constrast_lst, safety_factor=object_detection_params[ObjectDetectionParamsKW.SAFETY_FACTOR])
elif (object_detection_params[ObjectDetectionParamsKW.OBJECT_DETECTION_PATH] is not None):
raise RuntimeError("Path to object detection model doesn't exist")
return bounding_box_dict |
class DFTMRecovery(Recovery):
def __init__(self, hosts, env, training=False):
super().__init__()
self.hosts = hosts
self.env_name = ('simulator' if (env == '') else 'framework')
self.training = training
self.utilHistory = []
self.lr_bw = 10
def updateUtilHistory(self):
hostUtils = []
for host in self.env.hostlist:
hostUtils.append(host.getCPU())
self.utilHistory.append(hostUtils)
def predict_utilizations(self):
if (len(self.utilHistory) < self.lr_bw):
return self.env.scheduler.ThresholdHostSelection()
selectedHostIDs = []
x = list(range(self.lr_bw))
for (i, host) in enumerate(self.env.hostlist):
hostL = [self.utilHistory[j][i] for j in range(len(self.utilHistory))]
(_, estimates) = loess(x, hostL[(- self.lr_bw):], poly_degree=1, alpha=0.6)
weights = estimates['b'].values[(- 1)]
predictedCPU = (weights[0] + (weights[1] * (self.lr_bw + 1)))
if ((1.2 * predictedCPU) >= 100):
selectedHostIDs.append((predictedCPU, i))
selectedHostIDs = sorted(selectedHostIDs, reverse=True)
if (len(selectedHostIDs) > (0.04 * self.hosts)):
selectedHostIDs = selectedHostIDs[:int((0.04 * self.hosts))]
return [i[0] for i in selectedHostIDs]
def recover_decision(self, original_decision):
self.updateUtilHistory()
host_selection = self.predict_utilizations()
if (host_selection == []):
return original_decision
container_selection = self.env.scheduler.MMTContainerSelection(host_selection)
target_selection = self.env.scheduler.FirstFitPlacement(container_selection)
container_alloc = ([(- 1)] * len(self.env.hostlist))
for c in self.env.containerlist:
if (c and (c.getHostID() != (- 1))):
container_alloc[c.id] = c.getHostID()
decision_dict = dict(original_decision)
for (cid, hid) in target_selection:
if (container_alloc[cid] != hid):
decision_dict[cid] = hid
return list(decision_dict.items())
def run_model(self, time_series, original_decision):
return self.recover_decision(original_decision) |
class LabeledVideoDataset(Dataset):
_MAX_CONSECUTIVE_FAILURES = 10
def __init__(self, labeled_video_paths: list[tuple[(str, (dict | None))]], clip_sampler: ClipSampler, transform: (Callable[([dict], Any)] | None)=None, decode_audio: bool=True, decoder: str='pyav', decoder_args: DictConfig={}) -> None:
self._decode_audio = decode_audio
self._transform = transform
self._clip_sampler = clip_sampler
self._labeled_videos = labeled_video_paths
self._decoder = decoder
self._decoder_args = decoder_args
self._database = None
self._loaded_video_label = None
self._loaded_clip = None
self._next_clip_start_time = 0.0
self.video_path_handler = VideoPathHandler()
def __len__(self):
return len(self._labeled_videos)
def __getitem__(self, idx: int) -> dict:
for i_try in range(self._MAX_CONSECUTIVE_FAILURES):
try:
(video_path, info_dict) = self._labeled_videos[idx]
video = self.video_path_handler.video_from_path(video_path, decode_audio=self._decode_audio, decoder=self._decoder, num_frames=info_dict['num_frames'], **self._decoder_args)
self._loaded_video_label = (video, info_dict)
except Exception as e:
old_idx = idx
idx = random.randint(0, (len(self._labeled_videos) - 1))
warnings.warn('Failed to load video idx {} with error: {}; trial {}'.format(old_idx, e, i_try))
continue
sample_dicts = self._load_clips_recursively(video, info_dict, idx, i_try)
self._loaded_video_label[0].close()
self._loaded_video_label = None
self._next_clip_start_time = 0.0
self._clip_sampler.reset()
if (sample_dicts is None):
idx = random.randint(0, (len(self._labeled_videos) - 1))
continue
return sample_dicts
else:
raise RuntimeError(f'Failed to load video after {self._MAX_CONSECUTIVE_FAILURES} retries.')
def _load_clips_recursively(self, video: Any, info_dict: dict[(str, Any)], idx: int, i_try: int) -> ((Any | list[Any]) | None):
is_last_clip = False
is_first_clip = True
sample_dicts = []
while (not is_last_clip):
(clip_start, clip_end, clip_index, aug_index, is_last_clip) = self._clip_sampler(self._next_clip_start_time, video.duration, info_dict)
sample_dict = self._load_clip(video, clip_start, clip_end, clip_index, aug_index, info_dict, idx, i_try)
if (sample_dict is None):
return None
is_last_clip = (is_last_clip[(- 1)] if isinstance(is_last_clip, list) else is_last_clip)
if is_last_clip:
if is_first_clip:
return sample_dict
else:
if (type(sample_dict) is list):
sample_dicts.extend(sample_dict)
else:
sample_dicts.append(sample_dict)
return sample_dicts
elif (type(sample_dict) is list):
sample_dicts.extend(sample_dict)
else:
sample_dicts.append(sample_dict)
is_first_clip = False
def _load_clip(self, video: Any, clip_start: (float | list[float]), clip_end: (float | list[float]), clip_index: int, aug_index: int, info_dict: dict[(str, Any)], idx: int, i_try: int) -> (dict[(str, Any)] | None):
if isinstance(clip_start, list):
if (aug_index[0] == 0):
self._loaded_clip = {}
loaded_clip_list = []
for i in range(len(clip_start)):
clip_dict = video.get_clip(clip_start[i], clip_end[i])
if ((clip_dict is None) or (clip_dict['video'] is None)):
self._loaded_clip = None
break
loaded_clip_list.append(clip_dict)
if (self._loaded_clip is not None):
for key in loaded_clip_list[0].keys():
self._loaded_clip[key] = [x[key] for x in loaded_clip_list]
elif (aug_index == 0):
self._loaded_clip = video.get_clip(clip_start, clip_end)
self._next_clip_start_time = clip_end
video_is_null = ((self._loaded_clip is None) or (self._loaded_clip['video'] is None))
if video_is_null:
if video_is_null:
warnings.warn('Failed to load clip {} idx {}; trial {}'.format(video.name, idx, i_try))
return None
frames = self._loaded_clip['video']
audio_samples = self._loaded_clip['audio']
sample_dict = {'input': frames, 'video_name': video.name, 'idx': idx, 'clip_index': clip_index, 'aug_index': aug_index, **info_dict, **({'audio': audio_samples} if (audio_samples is not None) else {})}
if (self._transform is not None):
sample_dict = self._transform(sample_dict)
return sample_dict |
def cus_sample(feat, **kwargs):
assert ((len(kwargs.keys()) == 1) and (list(kwargs.keys())[0] in ['size', 'scale_factor']))
return F.interpolate(feat, **kwargs, mode='bilinear', align_corners=False) |
def add_metrics_to_dict(metrics, history, dot_str):
for (name, value) in metrics.items():
history[(name + dot_str)].append(value) |
class SVHN():
def __init__(self, args, normalize=False):
self.args = args
self.norm_layer = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
self.tr_train = [transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor()]
self.tr_test = [transforms.ToTensor()]
if normalize:
self.tr_train.append(self.norm_layer)
self.tr_test.append(self.norm_layer)
self.tr_train = transforms.Compose(self.tr_train)
self.tr_test = transforms.Compose(self.tr_test)
def data_loaders(self, **kwargs):
trainset = datasets.SVHN(root=os.path.join(self.args.data_dir, 'SVHN'), split='train', download=True, transform=self.tr_train)
subset_indices = np.random.permutation(np.arange(len(trainset)))[:int((self.args.data_fraction * len(trainset)))]
train_loader = DataLoader(trainset, batch_size=self.args.batch_size, sampler=SubsetRandomSampler(subset_indices), **kwargs)
testset = datasets.SVHN(root=os.path.join(self.args.data_dir, 'SVHN'), split='test', download=True, transform=self.tr_test)
test_loader = DataLoader(testset, batch_size=self.args.test_batch_size, shuffle=False, **kwargs)
print(f'Traing loader: {len(train_loader.dataset)} images, Test loader: {len(test_loader.dataset)} images')
return (train_loader, test_loader) |
def print_row(row, colwidth=10, latex=False):
if latex:
sep = ' & '
end_ = '\\\\'
else:
sep = ' '
end_ = ''
def format_val(x):
if np.issubdtype(type(x), np.floating):
x = '{:.10f}'.format(x)
return str(x).ljust(colwidth)[:colwidth]
print(sep.join([format_val(x) for x in row]), end_) |
def create_simplicial_complex_from_cliques(cliques):
G = nx.Graph()
triangles_list = set()
for c in cliques:
d = len(c)
if (d == 2):
(i, j) = c
G.add_edge(i, j)
elif (d == 3):
triangles_list.add(tuple(sorted(c)))
for (i, j) in combinations(c, 2):
G.add_edge(i, j)
else:
for (i, j, k) in combinations(c, 3):
triangles_list.add(tuple(sorted([i, j, k])))
for (i, j) in combinations(c, 2):
G.add_edge(i, j)
if (nx.is_connected(G) == False):
print('not connected')
node_neighbors_dict = {}
for n in G.nodes():
node_neighbors_dict[n] = G[n].keys()
triangles_list = [list(tri) for tri in triangles_list]
return (node_neighbors_dict, triangles_list) |
def test_vote_head():
if (not torch.cuda.is_available()):
pytest.skip('test requires GPU and torch+cuda')
_setup_seed(0)
vote_head_cfg = _get_vote_head_cfg('votenet/votenet_8x8_scannet-3d-18class.py')
self = build_head(vote_head_cfg).cuda()
fp_xyz = [torch.rand([2, 256, 3], dtype=torch.float32).cuda()]
fp_features = [torch.rand([2, 256, 256], dtype=torch.float32).cuda()]
fp_indices = [torch.randint(0, 128, [2, 256]).cuda()]
input_dict = dict(fp_xyz=fp_xyz, fp_features=fp_features, fp_indices=fp_indices)
ret_dict = self(input_dict, 'vote')
assert (ret_dict['center'].shape == torch.Size([2, 256, 3]))
assert (ret_dict['obj_scores'].shape == torch.Size([2, 256, 2]))
assert (ret_dict['size_res'].shape == torch.Size([2, 256, 18, 3]))
assert (ret_dict['dir_res'].shape == torch.Size([2, 256, 1]))
points = [torch.rand([40000, 4], device='cuda') for i in range(2)]
gt_bbox1 = LiDARInstance3DBoxes(torch.rand([10, 7], device='cuda'))
gt_bbox2 = LiDARInstance3DBoxes(torch.rand([10, 7], device='cuda'))
gt_bboxes = [gt_bbox1, gt_bbox2]
gt_labels = [torch.randint(0, 18, [10], device='cuda') for i in range(2)]
pts_semantic_mask = [torch.randint(0, 18, [40000], device='cuda') for i in range(2)]
pts_instance_mask = [torch.randint(0, 10, [40000], device='cuda') for i in range(2)]
losses = self.loss(ret_dict, points, gt_bboxes, gt_labels, pts_semantic_mask, pts_instance_mask)
assert (losses['vote_loss'] >= 0)
assert (losses['objectness_loss'] >= 0)
assert (losses['semantic_loss'] >= 0)
assert (losses['center_loss'] >= 0)
assert (losses['dir_class_loss'] >= 0)
assert (losses['dir_res_loss'] >= 0)
assert (losses['size_class_loss'] >= 0)
assert (losses['size_res_loss'] >= 0)
obj_scores = torch.rand([256], device='cuda')
sem_scores = torch.rand([256, 18], device='cuda')
points = torch.rand([40000, 3], device='cuda')
bbox = torch.rand([256, 7], device='cuda')
input_meta = dict(box_type_3d=DepthInstance3DBoxes)
(bbox_selected, score_selected, labels) = self.multiclass_nms_single(obj_scores, sem_scores, bbox, points, input_meta)
assert (bbox_selected.shape[0] >= 0)
assert (bbox_selected.shape[1] == 7)
assert (score_selected.shape[0] >= 0)
assert (labels.shape[0] >= 0)
points = torch.rand([1, 40000, 4], device='cuda')
seed_points = torch.rand([1, 1024, 3], device='cuda')
seed_indices = torch.randint(0, 40000, [1, 1024], device='cuda')
vote_points = torch.rand([1, 1024, 3], device='cuda')
vote_features = torch.rand([1, 256, 1024], device='cuda')
aggregated_points = torch.rand([1, 256, 3], device='cuda')
aggregated_indices = torch.range(0, 256, device='cuda')
obj_scores = torch.rand([1, 256, 2], device='cuda')
center = torch.rand([1, 256, 3], device='cuda')
dir_class = torch.rand([1, 256, 1], device='cuda')
dir_res_norm = torch.rand([1, 256, 1], device='cuda')
dir_res = torch.rand([1, 256, 1], device='cuda')
size_class = torch.rand([1, 256, 18], device='cuda')
size_res = torch.rand([1, 256, 18, 3], device='cuda')
sem_scores = torch.rand([1, 256, 18], device='cuda')
bbox_preds = dict(seed_points=seed_points, seed_indices=seed_indices, vote_points=vote_points, vote_features=vote_features, aggregated_points=aggregated_points, aggregated_indices=aggregated_indices, obj_scores=obj_scores, center=center, dir_class=dir_class, dir_res_norm=dir_res_norm, dir_res=dir_res, size_class=size_class, size_res=size_res, sem_scores=sem_scores)
results = self.get_bboxes(points, bbox_preds, [input_meta])
assert (results[0][0].tensor.shape[0] >= 0)
assert (results[0][0].tensor.shape[1] == 7)
assert (results[0][1].shape[0] >= 0)
assert (results[0][2].shape[0] >= 0) |
def convert_leg_pose_to_motor_angles(robot_class, leg_poses):
if (len(leg_poses) not in [8, 12]):
raise ValueError('Dimension of the leg pose provided is not 8 or 12.')
neutral_motor_angles = get_neutral_motor_angles(robot_class)
motor_angles = leg_poses
if ((len(neutral_motor_angles) == 12) and (len(leg_poses) == 8)):
for i in _ABDUCTION_ACTION_INDEXES:
motor_angles.insert(i, 0)
elif ((len(neutral_motor_angles) == 8) and (len(leg_poses) == 12)):
del leg_poses[::3]
if (str(robot_class) == str(laikago.Laikago)):
swing_scale = 1.0
extension_scale = 1.0
swing_scale = _LAIKAGO_SWING_CONVERSION_MULTIPLIER
extension_scale = _LAIKAGO_EXTENSION_CONVERSION_MULTIPLIER
else:
motor_angles = robot_class.convert_leg_pose_to_motor_angles(leg_poses)
return motor_angles |
def remove_input_tensor_hook_recursively(module):
if isinstance(module, Basic_ops):
pass
else:
module.__hook_handle__.remove()
del module.__hook_handle__
for (name, sub_module) in module._modules.items():
remove_input_tensor_hook_recursively(sub_module) |
class FIDInceptionA(models.inception.InceptionA):
def __init__(self, in_channels, pool_features):
super().__init__(in_channels, pool_features)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1) |
def test_tell_fails_when_ask_dqd_not_called(scheduler_fixture):
(scheduler, *_) = scheduler_fixture
with pytest.raises(RuntimeError):
scheduler.tell_dqd(None, None, None) |
def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
try:
import tensorflow as tf
import torch
except ImportError:
logger.error('Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see and for installation instructions.')
raise
import transformers
logger.info('Loading TensorFlow weights from {}'.format(tf_checkpoint_path))
tf_model_class_name = ('TF' + pt_model.__class__.__name__)
tf_model_class = getattr(transformers, tf_model_class_name)
tf_model = tf_model_class(pt_model.config)
if (tf_inputs is None):
tf_inputs = tf_model.dummy_inputs
if (tf_inputs is not None):
tf_model(tf_inputs, training=False)
tf_model.load_weights(tf_checkpoint_path, by_name=True)
return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys) |
def test_crunch_function_optimize_png_unoptimized_file(filename):
startpath = filename
testpath = (filename + '-crunch')
if os.path.exists(testpath):
os.remove(testpath)
src.crunch.optimize_png(startpath)
assert (os.path.exists(testpath) is True)
if os.path.exists(testpath):
os.remove(testpath) |
def tune_delta(loc, scale, y, ops=['add', 'mult'], delta_vals=[1e-08, 1e-07, 1e-06, 1e-05, 0.0001, 0.001, 0.01, 0.1, 0.0, 1.0, 10.0, 100.0, 1000.0], multipliers=[1.0, 2.5, 5.0], scoring='nll', verbose=0, logger=None):
assert (ops == ['add', 'mult'])
assert (loc.shape == scale.shape == y.shape)
results = []
for op in ops:
for delta in delta_vals:
for multiplier in multipliers:
if ((op == 'mult') and (delta == 0.0)):
continue
if (op == 'add'):
temp_scale = (scale + (delta * multiplier))
else:
temp_scale = (scale * (delta * multiplier))
score = util.eval_uncertainty(y=y, loc=loc, scale=temp_scale, metric=scoring)
results.append({'delta': delta, 'op': op, 'multiplier': multiplier, 'score': score})
df = pd.DataFrame(results).sort_values('score', ascending=True)
best_delta = (df.iloc[0]['delta'] * df.iloc[0]['multiplier'])
best_op = df.iloc[0]['op']
if (verbose > 0):
if logger:
logger.info(f'''
delta gridsearch:
{df}''')
else:
print(f'''
delta gridsearch:
{df}''')
return (best_delta, best_op) |
class Kepler():
solmass = 1.9892e+33
gee = 6.67e-08
n0 = 6.02254e+23
sigt = 6.65205e-25
k = 1.38054e-16
a = 7.5648e-15
me = 9.10908e-28
h = 6.62559e-27
c = .0
pie = 3.
solmassi = (1 / solmass)
solrad = .0
solradi = (1 / solrad)
penmex = 0.
year = .0
pie43 = ((pie * 4) / 3)
rk = (k * n0)
sb = ((a * c) / 4) |
class Mosaic():
def __init__(self, images: Union[(SlideMap, List[np.ndarray], np.ndarray, List[Tuple[(str, int)]])], coords: Optional[Union[(Tuple[(int, int)], np.ndarray)]]=None, *, tfrecords: List[str]=None, normalizer: Optional[Union[(str, 'StainNormalizer')]]=None, normalizer_source: Optional[str]=None, **grid_kwargs) -> None:
self.tile_point_distances = []
self.slide_map = None
self.tfrecords = tfrecords
self.grid_images = {}
self.grid_coords = []
self.grid_idx = []
if isinstance(images, SlideMap):
if (tfrecords is None):
raise ValueError('If building a Mosaic from a SlideMap, must provide paths to tfrecords via keyword arg tfrecords=...')
elif (isinstance(tfrecords, list) and (not len(tfrecords))):
raise errors.TFRecordsNotFoundError()
self._prepare_from_slidemap(images)
elif (isinstance(images[0], (tuple, list)) and isinstance(images[0][0], str)):
self._prepare_from_tuples(images, coords)
else:
assert (coords is not None)
assert (len(images) == len(coords))
self._prepare_from_coords(images, coords)
if (self.tfrecords is not None):
(_, self.img_format) = sf.io.detect_tfrecord_format(self.tfrecords[0])
else:
self.img_format = 'numpy'
if isinstance(normalizer, str):
log.info(f'Using realtime {normalizer} normalization')
self.normalizer = sf.norm.autoselect(method=normalizer, source=normalizer_source)
elif (normalizer is not None):
self.normalizer = normalizer
else:
self.normalizer = None
self.generate_grid(**grid_kwargs)
def _prepare_from_coords(self, images: Union[(List[np.ndarray], np.ndarray)], coords: List[Union[(Tuple[(int, int)], np.ndarray)]]) -> None:
log.info('Loading coordinates and plotting points...')
self.images = images
self.mapped_tiles = []
self.points = [{'coord': coords[i], 'global_index': i, 'category': 'none', 'has_paired_tile': False} for i in range(len(coords))]
def _prepare_from_slidemap(self, slide_map: SlideMap, *, tile_meta: Optional[Dict]=None) -> None:
log.info('Loading coordinates from SlideMap and plotting points...')
self.slide_map = slide_map
self.mapped_tiles = {}
self.points = slide_map.data.copy()
self.points['has_paired_tile'] = False
self.points['points_index'] = self.points.index
self.points['alpha'] = 1.0
if tile_meta:
self.points['meta'] = self.points.apply((lambda row: tile_meta[row.slide][row.tfr_index]), axis=1)
log.debug('Loading complete.')
def _prepare_from_tuples(self, images: List[Tuple[(str, int)]], coords: List[Union[(Tuple[(int, int)], np.ndarray)]]) -> None:
log.info('Loading coordinates from SlideMap and plotting points...')
self.mapped_tiles = {}
self.points = []
for (i, (tfr, idx)) in enumerate(images):
self.points.append({'coord': np.array(coords[i]), 'global_index': i, 'category': 'none', 'slide': (tfr if (self.tfrecords is not None) else sf.util.path_to_name(tfr)), 'tfrecord': (tfr if (self.tfrecords is None) else self._get_tfrecords_from_slide(tfr)), 'tfrecord_index': idx, 'has_paired_tile': None})
def _get_image_from_point(self, index):
point = self.points.loc[index]
if ('tfr_index' in point):
tfr = self._get_tfrecords_from_slide(point.slide)
tfr_idx = point.tfr_index
if (not tfr):
log.error(f'TFRecord {tfr} not found in slide_map')
return None
image = sf.io.get_tfrecord_by_index(tfr, tfr_idx)['image_raw']
else:
image = self.images[index]
return image
def _get_tfrecords_from_slide(self, slide: str) -> Optional[str]:
for tfr in self.tfrecords:
if (sf.util.path_to_name(tfr) == slide):
return tfr
log.error(f'Unable to find TFRecord path for slide [green]{slide}')
return None
def _initialize_figure(self, figsize, background):
import matplotlib.pyplot as plt
fig = plt.figure(figsize=figsize)
self.ax = fig.add_subplot(111, aspect='equal')
self.ax.set_facecolor(background)
fig.tight_layout()
plt.subplots_adjust(left=0.02, bottom=0, right=0.98, top=1, wspace=0.1, hspace=0)
self.ax.set_aspect('equal', 'box')
self.ax.set_xticklabels([])
self.ax.set_yticklabels([])
def _plot_tile_image(self, image, extent, alpha=1):
return self.ax.imshow(image, aspect='equal', origin='lower', extent=extent, zorder=99, alpha=alpha)
def _finalize_figure(self):
self.ax.autoscale(enable=True, tight=None)
def _record_point(self, index):
point = self.points.loc[index]
if ('tfr_index' in point):
tfr = self._get_tfrecords_from_slide(point.slide)
if (tfr is None):
return
if (tfr in self.mapped_tiles):
self.mapped_tiles[tfr] += [point.tfr_index]
else:
self.mapped_tiles[tfr] = [point.tfr_index]
else:
self.mapped_tiles += [index]
def decode_kwargs(self):
return dict(normalizer=self.normalizer, img_format=self.img_format)
def points_at_grid_index(self, x, y):
return self.points.loc[((self.points.grid_x == x) & (self.points.grid_y == y))]
def selected_points(self):
return self.points.loc[self.points.selected]
def generate_grid(self, num_tiles_x: int=50, tile_meta: Optional[Dict]=None, tile_select: str='first', max_dist: Optional[float]=None):
if (tile_select not in ('nearest', 'centroid', 'first')):
raise TypeError(f'Unknown tile selection method {tile_select}')
else:
log.debug(f'Tile selection method: {tile_select}')
self.num_tiles_x = num_tiles_x
self.grid_images = {}
x_points = self.points.x.values
y_points = self.points.y.values
max_x = x_points.max()
min_x = x_points.min()
max_y = y_points.max()
min_y = y_points.min()
log.debug(f'Loaded {len(self.points)} points.')
self.tile_size = ((max_x - min_x) / self.num_tiles_x)
self.num_tiles_y = int(((max_y - min_y) / self.tile_size))
self.grid_idx = np.reshape(np.dstack(np.indices((self.num_tiles_x, self.num_tiles_y))), ((self.num_tiles_x * self.num_tiles_y), 2))
_grid_offset = np.array([((self.tile_size / 2) + min_x), ((self.tile_size / 2) + min_y)])
self.grid_coords = ((self.grid_idx * self.tile_size) + _grid_offset)
points_added = 0
x_bins = np.arange(min_x, max_x, ((max_x - min_x) / self.num_tiles_x))[1:]
y_bins = np.arange(min_y, max_y, ((max_y - min_y) / self.num_tiles_y))[1:]
self.points['grid_x'] = np.digitize(self.points.x.values, x_bins, right=False)
self.points['grid_y'] = np.digitize(self.points.y.values, y_bins, right=False)
self.points['selected'] = False
log.debug(f'{points_added} points added to grid')
def select_nearest_points(idx):
(grid_x, grid_y) = (self.grid_idx[idx][0], self.grid_idx[idx][1])
grid_coords = self.grid_coords[idx]
_points = self.points_at_grid_index(grid_x, grid_y)
if (not _points.empty):
if (tile_select == 'nearest'):
point_coords = np.stack([_points.x.values, _points.y.values], axis=(- 1))
dist = np.linalg.norm((point_coords - grid_coords), ord=2, axis=1.0)
if (max_dist is not None):
masked_dist = np.ma.masked_array(dist, (dist >= (max_dist * self.tile_size)))
if masked_dist.count():
self.points.loc[(_points.index[np.argmin(masked_dist)], 'selected')] = True
else:
self.points.loc[(_points.index[np.argmin(dist)], 'selected')] = True
elif (not tile_meta):
raise errors.MosaicError('Mosaic centroid option requires tile_meta.')
else:
centroid_index = get_centroid_index(_points.meta.values)
self.points.loc[(_points.index[centroid_index], 'selected')] = True
start = time.time()
if (tile_select == 'first'):
grid_group = self.points.groupby(['grid_x', 'grid_y'])
first_indices = grid_group.nth(0).points_index.values
self.points.loc[(first_indices, 'selected')] = True
elif (tile_select in ('nearest', 'centroid')):
self.points['selected'] = False
dist_fn = partial(select_nearest_points)
pool = DPool(sf.util.num_cpu())
for (i, _) in track(enumerate(pool.imap_unordered(dist_fn, range(len(self.grid_idx))), 1), total=len(self.grid_idx)):
pass
pool.close()
pool.join()
else:
raise ValueError(f'Unrecognized value for tile_select: "{tile_select}"')
end = time.time()
if (sf.getLoggingLevel() <= 20):
sys.stdout.write('\r\x1b[K')
log.debug(f'Tile image selection complete ({(end - start):.1f} sec)')
def export(self, path: str) -> None:
if (self.slide_map is None):
raise ValueError('Mosaic.export() requires a Mosaic built from a SlideMap.')
self.slide_map.save(path)
if isinstance(self.tfrecords, list):
tfr = self.tfrecords
else:
tfr = list(self.tfrecords)
sf.util.write_json(tfr, join(path, 'tfrecords.json'))
log.info(f'Mosaic configuration exported to {path}')
def plot(self, figsize: Tuple[(int, int)]=(200, 200), focus: Optional[List[str]]=None, focus_slide: Optional[str]=None, background: str='#dfdfdf', pool: Optional[Any]=None) -> None:
if (((focus is not None) or (focus_slide is not None)) and (self.tfrecords is None)):
raise ValueError('Unable to plot with focus; slides/tfrecords not configured.')
log.debug('Initializing figure...')
self._initialize_figure(figsize=figsize, background=background)
if focus_slide:
self.points['alpha'] = 1.0
self.points['display_size'] = self.tile_size
if focus_slide:
for idx in self.grid_idx:
_points = self.points_at_grid_index(x=idx[0], y=idx[1])
if ((not _points.empty) and focus_slide):
n_matching = len(_points.loc[(_points.slide == focus_slide)])
self.points.loc[(_points.index, 'alpha')] = (n_matching / len(_points))
log.info('Placing image tiles...')
placed = 0
start = time.time()
to_map = []
should_close_pool = False
has_tfr = ('tfr_index' in self.points.columns)
selected_points = self.selected_points()
for (idx, point) in selected_points.iterrows():
if has_tfr:
tfr = self._get_tfrecords_from_slide(point.slide)
tfr_idx = point.tfr_index
if tfr:
image = (tfr, tfr_idx)
else:
log.error(f'TFRecord {tfr} not found in slide_map')
image = None
else:
image = self.images[idx]
to_map.append((idx, (point.grid_x * self.tile_size), (point.grid_y * self.tile_size), point.display_size, point.alpha, image))
if (pool is None):
pool = DPool(sf.util.num_cpu())
should_close_pool = True
for (i, (point_idx, image, extent, alpha)) in track(enumerate(pool.imap(partial(process_tile_image, decode_kwargs=self.decode_kwargs), to_map)), total=len(selected_points)):
if (point_idx is not None):
self._record_point(point_idx)
self._plot_tile_image(image, extent, alpha)
point = self.points.loc[point_idx]
self.grid_images[(point.grid_x, point.grid_y)] = image
placed += 1
if should_close_pool:
pool.close()
pool.join()
log.debug(f'Tile images placed: {placed} ({(time.time() - start):.2f}s)')
if focus:
self.focus(focus)
self._finalize_figure()
def save(self, filename: str, **kwargs: Any) -> None:
import matplotlib.pyplot as plt
self.plot(**kwargs)
log.info('Exporting figure...')
try:
if (not os.path.exists(os.path.dirname(filename))):
os.makedirs(os.path.dirname(filename))
except FileNotFoundError:
pass
plt.savefig(filename, bbox_inches='tight')
log.info(f'Saved figure to [green]{filename}')
plt.close()
def save_report(self, filename: str) -> None:
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(['slide', 'index'])
if isinstance(self.mapped_tiles, dict):
for tfr in self.mapped_tiles:
for idx in self.mapped_tiles[tfr]:
writer.writerow([tfr, idx])
else:
for idx in self.mapped_tiles:
writer.writerow([idx])
log.info(f'Mosaic report saved to [green]{filename}')
def view(self, slides: List[str]=None) -> None:
from slideflow.studio.widgets import MosaicWidget
from slideflow.studio import Studio
studio = Studio(widgets=[MosaicWidget])
mosaic = studio.get_widget('MosaicWidget')
mosaic.load(self.slide_map, tfrecords=self.tfrecords, slides=slides, normalizer=self.normalizer)
studio.run() |
class WeaklySupervisedCrackSeg():
def __init__(self, classifier_type='R50', classifier_weight_path='./', patch_size=32, stride_classifier=16, stride_thresholding=8):
self.classifier_type = classifier_type
self.classifier_weight_path = classifier_weight_path
self.patch_size = patch_size
self.stride_classifier = stride_classifier
self.stride_thresholding = stride_thresholding
self.classifier = model_factory(classifier_type=self.classifier_type)
self.classifier.load_weights(classifier_weight_path)
print('--- Classification Model -----')
print(self.classifier.summary())
def predict(self, img, detailed_output=False):
morph_kernel = np.ones((3, 3), np.uint8)
img_patches = _split_into_patches((img / 255.0), self.patch_size, self.stride_classifier)
classifier_pred = self.classifier(img_patches)
merged_classifier_pred = _merge_out_preds(classifier_pred, img.shape[0], img.shape[1], self.patch_size, self.stride_classifier)
merged_classifier_pred = (np.array(Image.fromarray(np.squeeze((merged_classifier_pred * 255)).astype('uint8')).resize((img.shape[1], img.shape[0]), 5)) / 255.0)
grad_cam_plus = make_gradcam_plus_heatmap((img / 255.0), self.classifier, 'global_average_pooling2d')
grad_cam_plus = (np.array(Image.fromarray(np.squeeze((grad_cam_plus * 255)).astype('uint8')).resize((img.shape[1], img.shape[0]), 5)) / 255.0)
merge_cam_class = ((grad_cam_plus + merged_classifier_pred) / 2.0)
merge_cam_class *= (merge_cam_class > 0.5)
merge_cam_class = cv2.erode(merge_cam_class, morph_kernel, iterations=4)
bilateral = cv2.bilateralFilter(img.astype('uint8'), 5, 120, 120)
thresholded = _norm_threshold_patches(bilateral, self.patch_size, self.stride_thresholding)
segmentation = (merge_cam_class * thresholded)
segmentation = (cv2.bilateralFilter((segmentation * 255).astype('uint8'), 5, 120, 120) / 255)
segmentation = cv2.morphologyEx((segmentation * 255).astype('uint8'), cv2.MORPH_CLOSE, morph_kernel)
if detailed_output:
return (segmentation, grad_cam_plus, merged_classifier_pred, merge_cam_class, thresholded)
else:
return segmentation |
def gen_min_sigs(project_name: str, class_name: str) -> str:
class_row = db.select(table_name='class', conditions={'project_name': project_name, 'class_name': class_name}, result_cols=['signature', 'fields'])
if (not class_row):
raise RuntimeError('Error happened in function gen_min_sigs.')
(c_sig, fields) = class_row[0]
full_text = (c_sig + '{\n')
full_text += (fields + '\n')
constructors = db.select(table_name='method', conditions={'class_name': class_name, 'is_constructor': True, 'project_name': project_name}, result_cols=['signature'])
for constructor in constructors:
full_text += (constructor[0] + '\n')
full_text += '\n}'
return full_text |
_module()
class TopDownMhpDataset(TopDownCocoDataset):
def __init__(self, ann_file, img_prefix, data_cfg, pipeline, test_mode=False):
super(TopDownCocoDataset, self).__init__(ann_file, img_prefix, data_cfg, pipeline, test_mode=test_mode)
self.use_gt_bbox = data_cfg['use_gt_bbox']
self.bbox_file = data_cfg['bbox_file']
self.image_thr = data_cfg['image_thr']
self.use_nms = data_cfg.get('use_nms', True)
self.soft_nms = data_cfg['soft_nms']
self.nms_thr = data_cfg['nms_thr']
self.oks_thr = data_cfg['oks_thr']
self.vis_thr = data_cfg['vis_thr']
self.bbox_thr = data_cfg['bbox_thr']
self.ann_info['flip_pairs'] = [[0, 5], [1, 4], [2, 3], [10, 15], [11, 14], [12, 13]]
self.ann_info['upper_body_ids'] = (7, 8, 9, 10, 11, 12, 13, 14, 15)
self.ann_info['lower_body_ids'] = (0, 1, 2, 3, 4, 5, 6)
self.ann_info['use_different_joint_weights'] = False
self.ann_info['joint_weights'] = np.array([1.5, 1.2, 1.0, 1.0, 1.2, 1.5, 1.0, 1.0, 1.0, 1.0, 1.5, 1.2, 1.0, 1.0, 1.2, 1.5], dtype=np.float32).reshape((self.ann_info['num_joints'], 1))
self.sigmas = (np.array([0.89, 0.83, 1.07, 1.07, 0.83, 0.89, 0.26, 0.26, 0.26, 0.26, 0.62, 0.72, 1.79, 1.79, 0.72, 0.62]) / 10.0)
self.coco = COCO(ann_file)
cats = [cat['name'] for cat in self.coco.loadCats(self.coco.getCatIds())]
self.classes = (['__background__'] + cats)
self.num_classes = len(self.classes)
self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))
self._class_to_coco_ind = dict(zip(cats, self.coco.getCatIds()))
self._coco_ind_to_class_ind = dict(((self._class_to_coco_ind[cls], self._class_to_ind[cls]) for cls in self.classes[1:]))
self.img_ids = self.coco.getImgIds()
self.num_images = len(self.img_ids)
(self.id2name, self.name2id) = self._get_mapping_id_name(self.coco.imgs)
self.dataset_name = 'mhp'
self.db = self._get_db()
print(f'=> num_images: {self.num_images}')
print(f'=> load {len(self.db)} samples')
def _get_db(self):
assert self.use_gt_bbox
gt_db = self._load_coco_keypoint_annotations()
return gt_db
def _do_python_keypoint_eval(self, res_file):
coco_det = self.coco.loadRes(res_file)
coco_eval = COCOeval(self.coco, coco_det, 'keypoints', self.sigmas, use_area=False)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
stats_names = ['AP', 'AP .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)']
info_str = list(zip(stats_names, coco_eval.stats))
return info_str |
def set_dtype_t(is_float32):
global dtype_t
dtype_t = (np.float32 if is_float32 else np.float64) |
def foo(x: jnp.ndarray) -> jnp.ndarray:
mlp = hk.nets.MLP([4, 5, 1])
loss = mlp(x).mean()
return loss |
class MyNeuronCoverage():
def __init__(self, threshold=0.5):
self._threshold = threshold
self._layer_neuron_id_to_global_neuron_id = {}
self._global_neuron_id_to_layer_neuron_id = {}
self._results = {}
self._num_layer = 0
self._num_neuron = 0
self._num_input = 0
self._report_layers_and_neurons = True
self.result = None
self.structure_initialized = False
def init_structure(self, intermediate_layer_outputs, features_index):
current_global_neuron_id = 0
for (layer_name, intermediate_layer_output) in intermediate_layer_outputs.items():
intermediate_layer_output_single_input = intermediate_layer_output[0]
num_layer_neuron = intermediate_layer_output_single_input.shape[features_index]
for layer_neuron_id in range(num_layer_neuron):
self._layer_neuron_id_to_global_neuron_id[(layer_name, layer_neuron_id)] = current_global_neuron_id
self._global_neuron_id_to_layer_neuron_id[current_global_neuron_id] = (layer_name, layer_neuron_id)
current_global_neuron_id += 1
self._num_layer += 1
self._num_neuron += num_layer_neuron
def update(self, intermediate_layer_inputs, intermediate_layer_outputs, features_index):
intermediate_layer_outputs_new = {}
for (name, intermediate_layer_output) in intermediate_layer_outputs.items():
intermediate_layer_output = common.to_numpy(intermediate_layer_output)
intermediate_layer_outputs_new[name] = intermediate_layer_output
intermediate_layer_outputs = intermediate_layer_outputs_new
intermediate_layer_inputs_new = {}
for (name, intermediate_layer_input) in intermediate_layer_inputs.items():
intermediate_layer_input = common.to_numpy(intermediate_layer_input)
intermediate_layer_inputs_new[name] = intermediate_layer_input
intermediate_layer_inputs = intermediate_layer_inputs_new
if (not self.structure_initialized):
self.init_structure(intermediate_layer_outputs, features_index)
self.structure_initialized = True
num_input = len(intermediate_layer_outputs[list(intermediate_layer_outputs.keys())[0]])
self._num_input += num_input
for layer_name in intermediate_layer_outputs.keys():
intermediate_layer_outputs[layer_name] = self._scale(intermediate_layer_outputs[layer_name])
intermediate_layer_inputs[layer_name] = self._scale(intermediate_layer_inputs[layer_name])
current_result = {}
for ((layer_name, intermediate_layer_input), (_, intermediate_layer_output)) in zip(intermediate_layer_inputs.items(), intermediate_layer_outputs.items()):
if (len(intermediate_layer_output.shape) > 2):
output_nc = self._calc_1(intermediate_layer_output, features_index, self._threshold)
input_nc = self._calc_1(intermediate_layer_input, features_index, self._threshold)
else:
output_nc = self._calc_2(intermediate_layer_output, features_index, self._threshold)
input_nc = self._calc_2(intermediate_layer_input, features_index, self._threshold)
current_result[layer_name] = (input_nc, output_nc)
self.result = copy.deepcopy(current_result)
def report(self, *args):
(num_input, num_neuron) = self.result.shape
for input_id in range(num_input):
coverage = (np.sum(self.result[input_id]) / num_neuron)
print(f'[NeuronCoverage] layers: {self._num_layer}, neurons: {self._num_neuron}, input_id {input_id}, coverage: {coverage:.4f}[{np.sum(self.result[input_id])}/{num_neuron}]')
def get(self):
if (self.result is None):
raise RunTimeError(f'Result is None!')
return self.result
(parallel=True)
def _scale(intermediate_layer_output):
for input_id in prange(intermediate_layer_output.shape[0]):
intermediate_layer_output[input_id] = ((intermediate_layer_output[input_id] - intermediate_layer_output[input_id].min()) / (intermediate_layer_output[input_id].max() - intermediate_layer_output[input_id].min()))
return intermediate_layer_output
(parallel=True)
def _calc_1(intermediate_layer_output, features_index, threshold):
num_layer_neuron = intermediate_layer_output[0].shape[features_index]
num_input = len(intermediate_layer_output)
result = np.zeros(shape=(num_input, num_layer_neuron), dtype=np.uint8)
for input_id in prange(intermediate_layer_output.shape[0]):
for layer_neuron_id in prange(num_layer_neuron):
if (features_index == (- 1)):
neuron_output = intermediate_layer_output[input_id][(..., layer_neuron_id)]
else:
neuron_output = intermediate_layer_output[input_id][layer_neuron_id]
mean = np.mean(neuron_output)
if (mean > threshold):
result[input_id][layer_neuron_id] = 1
return result
(parallel=True)
def _calc_2(intermediate_layer_output, features_index, threshold):
num_layer_neuron = intermediate_layer_output[0].shape[features_index]
num_input = len(intermediate_layer_output)
result = np.zeros(shape=(num_input, num_layer_neuron), dtype=np.uint8)
for input_id in prange(intermediate_layer_output.shape[0]):
for layer_neuron_id in prange(num_layer_neuron):
if (features_index == (- 1)):
neuron_output = intermediate_layer_output[input_id][(..., layer_neuron_id)]
else:
neuron_output = intermediate_layer_output[input_id][layer_neuron_id]
if (neuron_output > threshold):
result[input_id][layer_neuron_id] = 1
return result |
class CelebAHQTrain(FacesBase):
def __init__(self, size, keys=None):
super().__init__()
root = 'data/celebahq'
with open('data/celebahqtrain.txt', 'r') as f:
relpaths = f.read().splitlines()
paths = [os.path.join(root, relpath) for relpath in relpaths]
self.data = NumpyPaths(paths=paths, size=size, random_crop=False)
self.keys = keys |
def parse(log):
blocks = log[1:(- 1)].split('\n')
log_means = dict()
log_errs = dict()
for block in blocks:
chuncks = block.split('|')
name = chuncks[0].replace(' ', '')
scores = chuncks[2]
(mean, err) = scores.replace('score', '').split(' +/- ')
log_means[name] = float(mean)
log_errs[name] = float(err)
return (log_means, log_errs) |
class XgbAlgorithm(BaseAlgorithm):
algorithm_name = 'Extreme Gradient Boosting'
algorithm_short_name = 'Xgboost'
def __init__(self, params):
super(XgbAlgorithm, self).__init__(params)
self.library_version = xgb.__version__
self.explain_level = params.get('explain_level', 0)
self.boosting_rounds = additional.get('max_rounds', 10000)
self.max_iters = 1
self.early_stopping_rounds = additional.get('early_stopping_rounds', 50)
self.learner_params = {'tree_method': 'hist', 'booster': 'gbtree', 'objective': self.params.get('objective'), 'eval_metric': self.params.get('eval_metric'), 'eta': self.params.get('eta', 0.01), 'max_depth': self.params.get('max_depth', 1), 'min_child_weight': self.params.get('min_child_weight', 1), 'subsample': self.params.get('subsample', 0.8), 'colsample_bytree': self.params.get('colsample_bytree', 0.8), 'n_jobs': self.params.get('n_jobs', (- 1)), 'seed': self.params.get('seed', 1), 'verbosity': 0}
if ('lambda' in self.params):
self.learner_params['lambda'] = self.params['lambda']
if ('alpha' in self.params):
self.learner_params['alpha'] = self.params['alpha']
if (self.learner_params['seed'] > ):
self.learner_params['seed'] = (self.learner_params['seed'] % )
if ('num_class' in self.params):
self.learner_params['num_class'] = self.params.get('num_class')
if ('max_rounds' in self.params):
self.boosting_rounds = self.params['max_rounds']
self.custom_eval_metric = None
if (self.params.get('eval_metric', '') == 'r2'):
self.custom_eval_metric = xgboost_eval_metric_r2
elif (self.params.get('eval_metric', '') == 'spearman'):
self.custom_eval_metric = xgboost_eval_metric_spearman
elif (self.params.get('eval_metric', '') == 'pearson'):
self.custom_eval_metric = xgboost_eval_metric_pearson
elif (self.params.get('eval_metric', '') == 'f1'):
self.custom_eval_metric = xgboost_eval_metric_f1
elif (self.params.get('eval_metric', '') == 'average_precision'):
self.custom_eval_metric = xgboost_eval_metric_average_precision
elif (self.params.get('eval_metric', '') == 'accuracy'):
self.custom_eval_metric = xgboost_eval_metric_accuracy
elif (self.params.get('eval_metric', '') == 'mse'):
self.custom_eval_metric = xgboost_eval_metric_mse
elif (self.params.get('eval_metric', '') == 'user_defined_metric'):
self.custom_eval_metric = xgboost_eval_metric_user_defined
self.best_ntree_limit = 0
logger.debug('XgbLearner __init__')
'\n def get_boosting_rounds(self, dtrain, evals, esr, max_time):\n if max_time is None:\n return self.boosting_rounds\n\n start_time = time.time()\n evals_result = {}\n model = xgb.train(\n self.learner_params,\n dtrain,\n 2,\n evals=evals,\n early_stopping_rounds=esr,\n evals_result=evals_result,\n verbose_eval=False,\n )\n time_1_iter = (time.time() - start_time) / 2.0\n\n # 2.0 is just a scaling factor\n # purely heuristic\n iters = int(max_time / time_1_iter * 2.0)\n iters = max(iters, 100)\n iters = min(iters, 10000)\n return iters\n '
def fit(self, X, y, sample_weight=None, X_validation=None, y_validation=None, sample_weight_validation=None, log_to_file=None, max_time=None):
dtrain = xgb.DMatrix((X.values if isinstance(X, pd.DataFrame) else X), label=y, missing=np.NaN, weight=sample_weight)
dvalidation = xgb.DMatrix((X_validation.values if isinstance(X_validation, pd.DataFrame) else X_validation), label=y_validation, missing=np.NaN, weight=sample_weight_validation)
evals_result = {}
evals = []
esr = None
if ((X_validation is not None) and (y_validation is not None)):
evals = [(dtrain, 'train'), (dvalidation, 'validation')]
esr = self.early_stopping_rounds
if (self.custom_eval_metric is not None):
del self.learner_params['eval_metric']
self.model = xgb.train(self.learner_params, dtrain, self.boosting_rounds, evals=evals, early_stopping_rounds=esr, evals_result=evals_result, verbose_eval=False, feval=self.custom_eval_metric)
del dtrain
del dvalidation
if (log_to_file is not None):
metric_name = list(evals_result['train'].keys())[(- 1)]
result = pd.DataFrame({'iteration': range(len(evals_result['train'][metric_name])), 'train': evals_result['train'][metric_name], 'validation': evals_result['validation'][metric_name]})
if (metric_name in ['r2', 'spearman', 'pearson', 'f1', 'average_precision', 'accuracy']):
result['train'] *= (- 1.0)
result['validation'] *= (- 1.0)
result.to_csv(log_to_file, index=False, header=False)
self.best_ntree_limit = self.model.best_ntree_limit
def is_fitted(self):
return (self.model is not None)
def predict(self, X):
self.reload()
if (self.model is None):
raise XgbAlgorithmException('Xgboost model is None')
dtrain = xgb.DMatrix((X.values if isinstance(X, pd.DataFrame) else X), missing=np.NaN)
if ('iteration_range' in str(signature(self.model.predict))):
a = self.model.predict(dtrain, iteration_range=(0, self.best_ntree_limit))
else:
a = self.model.predict(dtrain, ntree_limit=self.best_ntree_limit)
return a
def copy(self):
return copy.deepcopy(self)
def save(self, model_file_path):
self.model.save_model(model_file_path)
self.model_file_path = model_file_path
logger.debug(('XgbAlgorithm save model to %s' % model_file_path))
def load(self, model_file_path):
logger.debug(('XgbLearner load model from %s' % model_file_path))
self.model = xgb.Booster()
self.model.load_model(model_file_path)
self.model_file_path = model_file_path
def file_extension(self):
return 'xgboost'
def get_metric_name(self):
metric = self.params.get('eval_metric')
if (metric is None):
return None
if (metric == 'mlogloss'):
return 'logloss'
return metric |
def check_finite(x, name):
if (not np.all(np.isfinite(x))):
if np.isscalar(x):
raise ValueError(f'{name} must be finite (infinity and NaN values are not supported).')
raise ValueError(f'All elements of {name} must be finite (infinity and NaN values are not supported).') |
class OptimizationArguments():
prune: bool = field(default=False, metadata={'help': 'Whether or not to apply prune.'})
pruning_approach: Optional[str] = field(default='BasicMagnitude', metadata={'help': 'Pruning approach. Supported approach is basic_magnite.'})
target_sparsity_ratio: Optional[float] = field(default=None, metadata={'help': 'Targeted sparsity when pruning the model.'})
metric_name: Optional[str] = field(default=None, metadata={'help': 'Metric used for the tuning strategy.'})
tolerance_mode: Optional[str] = field(default='absolute', metadata={'help': 'Metric tolerance model, expected to be relative or absolute.'})
perf_tol: Optional[float] = field(default=0.02, metadata={'help': 'Performance tolerance when optimizing the model.'})
benchmark: bool = field(default=False, metadata={'help': 'run benchmark.'})
accuracy_only: bool = field(default=False, metadata={'help': 'Whether to only test accuracy for model tuned by Neural Compressor.'}) |
def print_args(args):
print('\n')
print(' ARGUMENTS ')
for k in args.__dict__:
print('- {} : {}'.format(k, args.__dict__[k]))
print('\n') |
class CLIPFeatureExtractor(CLIPImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use CLIPImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs) |
def str_filt(str_, voc_type):
alpha_dict = {'digit': string.digits, 'lower': (string.digits + string.ascii_lowercase), 'upper': (string.digits + string.ascii_letters), 'all': ((string.digits + string.ascii_letters) + string.punctuation)}
if (voc_type == 'lower'):
str_ = str_.lower()
for char in str_:
if (char not in alpha_dict[voc_type]):
str_ = str_.replace(char, '')
return str_ |
def total_norm_constraint(tensor_vars, max_norm, epsilon=1e-07, return_norm=False):
norm = T.sqrt(sum((T.sum((tensor ** 2)) for tensor in tensor_vars)))
dtype = np.dtype(theano.config.floatX).type
target_norm = T.clip(norm, 0, dtype(max_norm))
multiplier = (target_norm / (dtype(epsilon) + norm))
tensor_vars_scaled = [(step * multiplier) for step in tensor_vars]
if return_norm:
return (tensor_vars_scaled, norm)
else:
return tensor_vars_scaled |
def chrf(hypotheses, references, remove_whitespace=True):
return sacrebleu.corpus_chrf(hypotheses=hypotheses, references=[references], remove_whitespace=remove_whitespace).score |
class LatentTransformerEncoderLayer(TransformerEncoderLayer):
def __init__(self, args, idx, layer_select=None):
super().__init__(args)
self.idx = idx
self.layer_select = layer_select
def residual_connection(self, x, residual):
return (residual + (x * self.layer_select(self.idx))) |
_model
def efficientnet_el(pretrained=False, **kwargs):
model = _gen_efficientnet_edge('efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model |
def build_optims_and_schedulers(model, critic, opt):
if (model.__class__.__name__ == 'jointTemplateResponseGenerator'):
optimR = nmt.Optim(opt.optim_method, opt.learning_rate_R, opt.max_grad_norm, opt.learning_rate_decay, opt.weight_decay, opt.start_decay_at)
optimR.set_parameters(model.parameters())
lr_lambda = (lambda epoch: (opt.learning_rate_decay ** epoch))
schedulerR = torch.optim.lr_scheduler.LambdaLR(optimizer=optimR.optimizer, lr_lambda=[lr_lambda])
return (optimR, schedulerR, None, None, None, None)
optimR = nmt.Optim(opt.optim_method, opt.learning_rate_R, opt.max_grad_norm, opt.learning_rate_decay, opt.weight_decay, opt.start_decay_at)
optimR.set_parameters(model.response_generator.parameters())
lr_lambda = (lambda epoch: (opt.learning_rate_decay ** epoch))
schedulerR = torch.optim.lr_scheduler.LambdaLR(optimizer=optimR.optimizer, lr_lambda=[lr_lambda])
optimT = nmt.Optim(opt.optim_method, opt.learning_rate_T, opt.max_grad_norm, opt.learning_rate_decay, opt.weight_decay, opt.start_decay_at)
optimT.set_parameters(model.template_generator.parameters())
schedulerT = torch.optim.lr_scheduler.LambdaLR(optimizer=optimT.optimizer, lr_lambda=[lr_lambda])
if (critic is not None):
optimC = nmt.Optim(opt.optim_method, opt.learning_rate_C, opt.max_grad_norm, opt.learning_rate_decay, opt.weight_decay, opt.start_decay_at)
optimC.set_parameters(critic.parameters())
schedulerC = torch.optim.lr_scheduler.LambdaLR(optimizer=optimC.optimizer, lr_lambda=[lr_lambda])
else:
(optimC, schedulerC) = (None, None)
return (optimR, schedulerR, optimT, schedulerT, optimC, schedulerC) |
def get_art_abs(story_file):
lines = read_story_file(story_file)
lines = [' '.join(line.lower().strip().split()) for line in lines]
lines = [fix_missing_period(line) for line in lines]
article_lines = []
highlights = []
next_is_highlight = False
for (idx, line) in enumerate(lines):
if (line == ''):
continue
elif line.startswith(''):
next_is_highlight = True
elif next_is_highlight:
highlights.append(line)
else:
article_lines.append(line)
return (article_lines, highlights) |
class MultiSkipLSTMCell(tf.nn.rnn_cell.RNNCell):
def __init__(self, num_units, forget_bias=1.0, activation=tf.tanh, layer_norm=False, update_bias=1.0):
if (not isinstance(num_units, list)):
num_units = [num_units]
self._num_units = num_units
self._num_layers = len(self._num_units)
self._forget_bias = forget_bias
self._activation = activation
self._layer_norm = layer_norm
self._update_bias = update_bias
def state_size(self):
return ([LSTMStateTuple(num_units, num_units) for num_units in self._num_units[:(- 1)]] + [SkipLSTMStateTuple(self._num_units[(- 1)], self._num_units[:(- 1)], 1, 1)])
def output_size(self):
return SkipLSTMOutputTuple(self._num_units[(- 1)], 1)
def __call__(self, inputs, state, scope=None):
with tf.variable_scope((scope or type(self).__name__)):
(update_prob_prev, cum_update_prob_prev) = (state[(- 1)].update_prob, state[(- 1)].cum_update_prob)
cell_input = inputs
state_candidates = []
for idx in range(self._num_layers):
with tf.variable_scope(('layer_%d' % (idx + 1))):
(c_prev, h_prev) = (state[idx].c, state[idx].h)
concat = rnn_ops.linear([cell_input, h_prev], (4 * self._num_units[idx]), True)
(i, j, f, o) = tf.split(value=concat, num_or_size_splits=4, axis=1)
if self._layer_norm:
i = rnn_ops.layer_norm(i, name='i')
j = rnn_ops.layer_norm(j, name='j')
f = rnn_ops.layer_norm(f, name='f')
o = rnn_ops.layer_norm(o, name='o')
new_c_tilde = ((c_prev * tf.sigmoid((f + self._forget_bias))) + (tf.sigmoid(i) * self._activation(j)))
new_h_tilde = (self._activation(new_c_tilde) * tf.sigmoid(o))
state_candidates.append(LSTMStateTuple(new_c_tilde, new_h_tilde))
cell_input = new_h_tilde
with tf.variable_scope('state_update_prob'):
new_update_prob_tilde = rnn_ops.linear(state_candidates[(- 1)].c, 1, True, bias_start=self._update_bias)
new_update_prob_tilde = tf.sigmoid(new_update_prob_tilde)
cum_update_prob = (cum_update_prob_prev + tf.minimum(update_prob_prev, (1.0 - cum_update_prob_prev)))
update_gate = _binary_round(cum_update_prob)
new_states = []
for idx in range((self._num_layers - 1)):
new_c = ((update_gate * state_candidates[idx].c) + ((1.0 - update_gate) * state[idx].c))
new_h = ((update_gate * state_candidates[idx].h) + ((1.0 - update_gate) * state[idx].h))
new_states.append(LSTMStateTuple(new_c, new_h))
new_c = ((update_gate * state_candidates[(- 1)].c) + ((1.0 - update_gate) * state[(- 1)].c))
new_h = ((update_gate * state_candidates[(- 1)].h) + ((1.0 - update_gate) * state[(- 1)].h))
new_update_prob = ((update_gate * new_update_prob_tilde) + ((1.0 - update_gate) * update_prob_prev))
new_cum_update_prob = ((update_gate * 0.0) + ((1.0 - update_gate) * cum_update_prob))
new_states.append(SkipLSTMStateTuple(new_c, new_h, new_update_prob, new_cum_update_prob))
new_output = SkipLSTMOutputTuple(new_h, update_gate)
return (new_output, new_states)
def trainable_initial_state(self, batch_size):
initial_states = []
for idx in range((self._num_layers - 1)):
with tf.variable_scope(('layer_%d' % (idx + 1))):
with tf.variable_scope('initial_c'):
initial_c = rnn_ops.create_initial_state(batch_size, self._num_units[idx])
with tf.variable_scope('initial_h'):
initial_h = rnn_ops.create_initial_state(batch_size, self._num_units[idx])
initial_states.append(LSTMStateTuple(initial_c, initial_h))
with tf.variable_scope(('layer_%d' % self._num_layers)):
with tf.variable_scope('initial_c'):
initial_c = rnn_ops.create_initial_state(batch_size, self._num_units[(- 1)])
with tf.variable_scope('initial_h'):
initial_h = rnn_ops.create_initial_state(batch_size, self._num_units[(- 1)])
with tf.variable_scope('initial_update_prob'):
initial_update_prob = rnn_ops.create_initial_state(batch_size, 1, trainable=False, initializer=tf.ones_initializer())
with tf.variable_scope('initial_cum_update_prob'):
initial_cum_update_prob = rnn_ops.create_initial_state(batch_size, 1, trainable=False, initializer=tf.zeros_initializer())
initial_states.append(SkipLSTMStateTuple(initial_c, initial_h, initial_update_prob, initial_cum_update_prob))
return initial_states |
class FCN8sd(nn.Module):
def __init__(self, backbone, backbone_out_channels=2048, aux=False, fixed_size=True, in_channels=3, in_size=(480, 480), num_classes=21):
super(FCN8sd, self).__init__()
assert (in_channels > 0)
self.in_size = in_size
self.num_classes = num_classes
self.aux = aux
self.fixed_size = fixed_size
self.backbone = backbone
pool_out_channels = backbone_out_channels
self.final_block = FCNFinalBlock(in_channels=pool_out_channels, out_channels=num_classes)
if self.aux:
aux_out_channels = (backbone_out_channels // 2)
self.aux_block = FCNFinalBlock(in_channels=aux_out_channels, out_channels=num_classes)
self._init_params()
def _init_params(self):
for (name, module) in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if (module.bias is not None):
init.constant_(module.bias, 0)
def forward(self, x):
in_size = (self.in_size if self.fixed_size else x.shape[2:])
(x, y) = self.backbone(x)
x = self.final_block(x, in_size)
if self.aux:
y = self.aux_block(y, in_size)
return (x, y)
else:
return x |
def GetDetectObjectsService(srv='/costar_perception/segmenter'):
return GetService(srv, EmptySrv) |
class BaseSRDataset(BaseDataset):
def __init__(self, pipeline, scale, test_mode=False):
super().__init__(pipeline, test_mode)
self.scale = scale
def scan_folder(path):
if isinstance(path, (str, Path)):
path = str(path)
else:
raise TypeError(f"'path' must be a str or a Path object, but received {type(path)}.")
images = list(scandir(path, suffix=IMG_EXTENSIONS, recursive=True))
images = [osp.join(path, v) for v in images]
assert images, f'{path} has no valid image file.'
return images
def __getitem__(self, idx):
results = copy.deepcopy(self.data_infos[idx])
results['scale'] = self.scale
return self.pipeline(results)
def evaluate(self, results, logger=None):
if (not isinstance(results, list)):
raise TypeError(f'results must be a list, but got {type(results)}')
assert (len(results) == len(self)), f'The length of results is not equal to the dataset len: {len(results)} != {len(self)}'
results = [res['eval_result'] for res in results]
eval_result = defaultdict(list)
for res in results:
for (metric, val) in res.items():
eval_result[metric].append(val)
for (metric, val_list) in eval_result.items():
assert (len(val_list) == len(self)), f'Length of evaluation result of {metric} is {len(val_list)}, should be {len(self)}'
eval_result.update({metric: (sum(values) / len(self)) for (metric, values) in eval_result.items() if (metric not in (['_inception_feat'] + FEATURE_BASED_METRICS))})
if ('_inception_feat' in eval_result):
(feat1, feat2) = ([], [])
for (f1, f2) in eval_result['_inception_feat']:
feat1.append(f1)
feat2.append(f2)
feat1 = np.concatenate(feat1, 0)
feat2 = np.concatenate(feat2, 0)
for metric in FEATURE_BASED_METRICS:
if (metric in eval_result):
metric_func = build_metric(eval_result[metric].pop())
eval_result[metric] = metric_func(feat1, feat2)
del eval_result['_inception_feat']
return eval_result |
def initialize_graph():
global max_generation
plot_1.xaxis.label.set_color('c')
plot_1.yaxis.label.set_color('r')
plot_1.set_xlabel('Generation')
plot_1.set_ylabel('Fitness')
plot_1.set_title('Fitness Graph', x=0.2, fontsize=20)
plot_1.set_xlim([(- (max_generation / 20)), max_generation])
plot_1.set_ylim([0, 100])
plot_1.xaxis.set_major_locator(mtick.MaxNLocator(11))
plot_1.yaxis.set_major_locator(mtick.MaxNLocator(11))
plot_1.yaxis.set_major_formatter(mtick.PercentFormatter())
plot_1.grid(color='k', alpha=0.5, linestyle='-.', linewidth=0.5)
plot_1.set_facecolor('xkcd:off white') |
class TFRobertaPreLayerNormModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def test_parse_mongo_db_arg_hostname_dbname_collection_name():
assert (MongoDbOption.parse_mongo_db_arg('localhost:28017:foo.bar') == {'url': 'localhost:28017', 'db_name': 'foo', 'collection': 'bar'})
assert (MongoDbOption.parse_mongo_db_arg('www.mymongo.db:28017:bar.baz') == {'url': 'www.mymongo.db:28017', 'db_name': 'bar', 'collection': 'baz'})
assert (MongoDbOption.parse_mongo_db_arg('123.45.67.89:27017:baz.foo') == {'url': '123.45.67.89:27017', 'db_name': 'baz', 'collection': 'foo'}) |
class TestTaskEmbeddingWorker(TfGraphTestCase):
def test_task_embedding_worker(self):
env = GarageEnv(DummyBoxEnv(obs_dim=(1,)))
env.active_task_one_hot = np.array([1.0, 0.0, 0.0, 0.0])
env._active_task_one_hot = (lambda : np.array([1.0, 0.0, 0.0, 0.0]))
a = np.random.random(env.action_space.shape)
z = np.random.random(5)
latent_info = dict(mean=np.random.random(5))
agent_info = dict(dummy='dummy')
policy = Mock()
policy.get_latent.return_value = (z, latent_info)
policy.latent_space.flatten.return_value = z
policy.get_action_given_latent.return_value = (a, agent_info)
worker = TaskEmbeddingWorker(seed=1, max_path_length=100, worker_number=1)
worker.update_agent(policy)
worker.update_env(env)
rollouts = worker.rollout()
assert ('task_onehot' in rollouts.env_infos)
assert np.array_equal(rollouts.env_infos['task_onehot'][0], env.active_task_one_hot)
assert ('latent' in rollouts.agent_infos)
assert np.array_equal(rollouts.agent_infos['latent'][0], z)
assert ('latent_mean' in rollouts.agent_infos)
assert np.array_equal(rollouts.agent_infos['latent_mean'][0], latent_info['mean']) |
def _max_helper_all_tree_reductions(enc_tensor, dim=None, method='log_reduction', keepdim=False):
assert (method == 'log_reduction')
if (method == 'log_reduction'):
return _max_helper_log_reduction(enc_tensor, dim, keepdim) |
class Toast():
msg_duration = 4
fade_duration = 0.25
def __init__(self, message, title, icon, sticky=False, spinner=False, progress=False):
if (icon and (title is None)):
title = icon.capitalize()
self._alpha = 0
self._height = None
self._default_message_height = 75
self._create_time = time.time()
self._start_fade_time = None
self._progress_vals = [0]
self.spinner = spinner
self.message = message
self.title = title
self.icon = icon
self.sticky = sticky
self.progress = self._parse_progress(progress)
def __str__(self):
return '<Toast message={!r}, title={!r}, icon={!r}, alpha={!r}, sticky={!r}, spinner={!r}, progress={!r}'.format(self.message, self.title, self.icon, self.alpha, self.sticky, self.spinner, self.progress)
def alpha(self):
elapsed = (time.time() - self._create_time)
if (elapsed < self.fade_duration):
return (elapsed / self.fade_duration)
elif (self.sticky or (elapsed < (self.fade_duration + self.msg_duration))):
return 1
elif (elapsed < ((self.fade_duration * 2) + self.msg_duration)):
if (self._start_fade_time is None):
self._start_fade_time = time.time()
return (1 - ((time.time() - self._start_fade_time) / self.fade_duration))
else:
return 0
def expired(self):
return ((not self.sticky) and ((time.time() - self._create_time) > (self.msg_duration + (self.fade_duration * 2))))
def height(self):
if self._height:
return self._height
else:
line_height = imgui.get_text_line_height_with_spacing()
running_height = 0
if (self.title and (self.message is None)):
running_height = line_height
elif (self.title and self.message):
running_height = ((line_height * 1.5) + self._default_message_height)
else:
running_height = self._default_message_height
if self.progress:
running_height += line_height
return running_height
def width(self):
return 400
def _parse_progress(self, val):
if isinstance(val, bool):
return val
elif isinstance(val, (int, float)):
self._progress_vals[0] = val
return True
elif isinstance(val, list):
if (not all((isinstance(x, (float, int)) for x in val))):
raise ValueError('Progress must be a float or list of floats.')
self._progress_vals = val
return True
else:
return False
def done(self):
self.sticky = False
self.msg_duration = 0
def set_progress(self, val, bar_id=0):
self._progress_vals[bar_id] = val
def render(self, viz, toast_id=0, height_offset=0, padding=20):
imgui.push_style_var(imgui.STYLE_ALPHA, self.alpha)
_old_rounding = imgui.get_style().window_rounding
imgui.get_style().window_rounding = 5
imgui.set_next_window_position((viz.content_width - (self.width + padding)), (viz.content_height - height_offset))
imgui.set_next_window_size(self.width, 0)
imgui.begin(f'toast{toast_id}', flags=((imgui.WINDOW_NO_TITLE_BAR | imgui.WINDOW_NO_RESIZE) | imgui.WINDOW_NO_SCROLLBAR))
if self.icon:
viz.icon(self.icon, sameline=True)
if self.title:
if self.spinner:
imgui.text(f'{self.title}{imgui_utils.spinner_text()}')
else:
imgui.text(self.title)
if self.message:
imgui.separator()
if self.message:
imgui.push_text_wrap_pos()
imgui.text(self.message)
if (self.spinner and (not self.title)):
imgui.same_line()
imgui_utils.spinner()
imgui.pop_text_wrap_pos()
if self.progress:
for val in self._progress_vals:
imgui_utils.progress_bar(val, y_pad=2, color=(0.55, 1, 0.47, 1))
self._height = imgui.get_window_height()
imgui.end()
imgui.pop_style_var()
imgui.get_style().window_rounding = _old_rounding |
class TFBertForPreTraining(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def split_4d(task_string):
base_folder = join(raw_dataset_dir, task_string)
output_folder = join(splitted_4d_output_dir, task_string)
if isdir(output_folder):
shutil.rmtree(output_folder)
files = []
output_dirs = []
maybe_mkdir_p(output_folder)
for subdir in ['imagesTr', 'imagesTs']:
curr_out_dir = join(output_folder, subdir)
if (not isdir(curr_out_dir)):
os.mkdir(curr_out_dir)
curr_dir = join(base_folder, subdir)
nii_files = [join(curr_dir, i) for i in os.listdir(curr_dir) if i.endswith('.nii.gz')]
nii_files.sort()
for n in nii_files:
files.append(n)
output_dirs.append(curr_out_dir)
shutil.copytree(join(base_folder, 'labelsTr'), join(output_folder, 'labelsTr'))
p = Pool(8)
p.starmap(split_4d_nifti, zip(files, output_dirs))
p.close()
p.join()
shutil.copy(join(base_folder, 'dataset.json'), output_folder) |
class UserResponse(ConversationTurn):
speaker: str = 'USER'
annotations: List[TurnAnnotation] = Field(..., description='List of annotations.')
class Config():
schema_extra = {'example': {'speaker': 'USER', 'utterance': 'I am allergic to tomatoes but we have a lot of famous Italian restaurants here in London.', 'annotations': [[17, 8, 'tomatoes', 'Tomato'], [54, 19, 'Italian restaurants', 'Italian_cuisine'], [82, 6, 'London', 'London']]}} |
def parse_args():
parser = ArgumentParser(description='Testing script: Linear evaluation')
parser.add_argument('model_path', type=str, help='Path to the (discriminator) model checkpoint')
parser.add_argument('architecture', type=str, help='Architecture')
parser.add_argument('--n_classes', type=int, default=10, help='Number of classes (default: 10)')
parser.add_argument('--batch_size', default=256, type=int, help='Batch size (default: 256)')
return parser.parse_args() |
class ELUFlow(Flow):
def __init__(self, alpha=1.0, inverse=False):
super(ELUFlow, self).__init__(inverse)
self.alpha = alpha
def forward(self, input: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
out = F.elu(input, self.alpha, False)
input = input.view(input.size(0), (- 1))
logdet = (input + math.log(self.alpha))
logdet = (input.lt(0.0).type_as(input) * logdet).sum(dim=1)
return (out, logdet)
def backward(self, input: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
mask = input.lt(0.0).type_as(input)
out = ((input * (1.0 - mask)) + (mask * logPlusOne(input.div(self.alpha))))
out_flat = out.view(input.size(0), (- 1))
logdet = (out_flat + math.log(self.alpha))
logdet = (mask.view(out_flat.size()) * logdet).sum(dim=1).mul((- 1.0))
return (out, logdet)
def init(self, data, init_scale=1.0) -> Tuple[(torch.Tensor, torch.Tensor)]:
with torch.no_grad():
return self.forward(data)
def extra_repr(self):
return 'inverse={}, alpha={}'.format(self.inverse, self.alpha)
def from_params(cls, params: Dict) -> 'ELUFlow':
return ELUFlow(**params) |
class CorrBlockSingleScale(nn.Module):
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
super().__init__()
self.radius = radius
corr = CorrBlock.corr(fmap1, fmap2)
(batch, h1, w1, dim, h2, w2) = corr.shape
self.corr = corr.reshape(((batch * h1) * w1), dim, h2, w2)
def __call__(self, coords):
r = self.radius
coords = coords.permute(0, 2, 3, 1)
(batch, h1, w1, _) = coords.shape
corr = self.corr
dx = torch.linspace((- r), r, ((2 * r) + 1))
dy = torch.linspace((- r), r, ((2 * r) + 1))
delta = torch.stack(torch.meshgrid(dy, dx), axis=(- 1)).to(coords.device)
centroid_lvl = coords.reshape(((batch * h1) * w1), 1, 1, 2)
delta_lvl = delta.view(1, ((2 * r) + 1), ((2 * r) + 1), 2)
coords_lvl = (centroid_lvl + delta_lvl)
corr = bilinear_sampler(corr, coords_lvl)
out = corr.view(batch, h1, w1, (- 1))
out = out.permute(0, 3, 1, 2).contiguous().float()
return out
def corr(fmap1, fmap2):
(batch, dim, ht, wd) = fmap1.shape
fmap1 = fmap1.view(batch, dim, (ht * wd))
fmap2 = fmap2.view(batch, dim, (ht * wd))
corr = torch.matmul(fmap1.transpose(1, 2), fmap2)
corr = corr.view(batch, ht, wd, 1, ht, wd)
return (corr / torch.sqrt(torch.tensor(dim).float())) |
class ResNet50TP(nn.Module):
def __init__(self, num_classes, loss={'xent'}, **kwargs):
super(ResNet50TP, self).__init__()
self.loss = loss
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:(- 2)])
self.feat_dim = 2048
self.classifier = nn.Linear(self.feat_dim, num_classes)
def forward(self, x):
b = x.size(0)
t = x.size(1)
x = x.view((b * t), x.size(2), x.size(3), x.size(4))
x = self.base(x)
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(b, t, (- 1))
x = x.permute(0, 2, 1)
f = F.avg_pool1d(x, t)
f = f.view(b, self.feat_dim)
if (not self.training):
return f
y = self.classifier(f)
if (self.loss == {'xent'}):
return y
elif (self.loss == {'xent', 'htri'}):
return (y, f)
elif (self.loss == {'cent'}):
return (y, f)
else:
raise KeyError('Unsupported loss: {}'.format(self.loss)) |
class SharedDepthwiseInducingImages(SharedInducingImages, DepthwiseInducingImages):
def __init__(self, images: TensorData, channels_in: int, name: Optional[str]=None):
SharedInducingImages.__init__(self, name=name, images=images, channels_in=channels_in) |
def calc_index(node, c):
ind = (((((node.yaw_index - c.min_yaw) * c.x_w) * c.y_w) + ((node.y_index - c.min_y) * c.x_w)) + (node.x_index - c.min_x))
if (ind <= 0):
print('Error(calc_index):', ind)
return ind |
class Params():
def __init__(self):
self.cuda_details = gnn_utils.CudaDetails(use_cuda=torch.cuda.is_available())
self.gnn_args = dict(output_dim=25, hidden_layer_size=101, edge_names=['single', 'double', 'triple'], embedding_dim=50, T=4)
processed_data_dir = mchef_config.get_processed_data_dir()
self.path_mol_details = path.join(processed_data_dir, 'reactants_feats.pick')
self.product_files_to_try = [('test_reachable', path.join(processed_data_dir, 'test_products.txt')), ('test_unreachable', path.join(processed_data_dir, 'test_unreachable_products.txt'))]
arguments = docopt(__doc__)
self.weights_to_use_mchef = arguments['<input_weights_mchef>']
self.weights_to_use_regressor = arguments['<input_weights_regressor>'] |
def vgg13_bn(pretrained=False, **kwargs):
model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
model.cfg = cfg['B']
model.batch_norm = True
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13_bn']))
return model |
def main():
args = parse_args()
update_config(cfg, args)
if (args.prevModelDir and args.modelDir):
copy_prev_models(args.prevModelDir, args.modelDir)
(logger, final_output_dir, tb_log_dir) = create_logger(cfg, args.cfg, 'train')
logger.info(pprint.pformat(args))
logger.info(cfg)
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
model = eval((('models.' + cfg.MODEL.NAME) + '.get_pose_net'))(cfg, is_train=True)
this_dir = os.path.dirname(__file__)
shutil.copy2(os.path.join(this_dir, '../lib/models', (cfg.MODEL.NAME + '.py')), final_output_dir)
writer_dict = {'writer': SummaryWriter(log_dir=tb_log_dir), 'train_global_steps': 0, 'valid_global_steps': 0}
dump_input = torch.rand((1, 3, cfg.MODEL.IMAGE_SIZE[1], cfg.MODEL.IMAGE_SIZE[0]))
writer_dict['writer'].add_graph(model, (dump_input,))
logger.info(get_model_summary(model, dump_input))
model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
criterion = JointsMSELoss(use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT).cuda()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = eval(('dataset.' + cfg.DATASET.DATASET))(cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_SET, True, transforms.Compose([transforms.ToTensor(), normalize]))
valid_dataset = eval(('dataset.' + cfg.DATASET.DATASET))(cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False, transforms.Compose([transforms.ToTensor(), normalize]))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=(cfg.TRAIN.BATCH_SIZE_PER_GPU * len(cfg.GPUS)), shuffle=cfg.TRAIN.SHUFFLE, num_workers=cfg.WORKERS, pin_memory=cfg.PIN_MEMORY)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=(cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS)), shuffle=False, num_workers=cfg.WORKERS, pin_memory=cfg.PIN_MEMORY)
best_perf = 0.0
best_model = False
last_epoch = (- 1)
optimizer = get_optimizer(cfg, model)
begin_epoch = cfg.TRAIN.BEGIN_EPOCH
checkpoint_file = os.path.join(final_output_dir, 'checkpoint.pth')
if (cfg.AUTO_RESUME and os.path.exists(checkpoint_file)):
logger.info("=> loading checkpoint '{}'".format(checkpoint_file))
checkpoint = torch.load(checkpoint_file)
begin_epoch = checkpoint['epoch']
best_perf = checkpoint['perf']
last_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger.info("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_file, checkpoint['epoch']))
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, cfg.TRAIN.LR_STEP, cfg.TRAIN.LR_FACTOR, last_epoch=last_epoch)
for epoch in range(begin_epoch, cfg.TRAIN.END_EPOCH):
lr_scheduler.step()
train(cfg, train_loader, model, criterion, optimizer, epoch, final_output_dir, tb_log_dir, writer_dict)
perf_indicator = validate(cfg, valid_loader, valid_dataset, model, criterion, final_output_dir, tb_log_dir, writer_dict)
if (perf_indicator >= best_perf):
best_perf = perf_indicator
best_model = True
else:
best_model = False
logger.info('=> saving checkpoint to {}'.format(final_output_dir))
save_checkpoint({'epoch': (epoch + 1), 'model': cfg.MODEL.NAME, 'state_dict': model.state_dict(), 'best_state_dict': model.module.state_dict(), 'perf': perf_indicator, 'optimizer': optimizer.state_dict()}, best_model, final_output_dir)
final_model_state_file = os.path.join(final_output_dir, 'final_state.pth')
logger.info('=> saving final model state to {}'.format(final_model_state_file))
torch.save(model.module.state_dict(), final_model_state_file)
writer_dict['writer'].close() |
class NegLogLikehoodLoss(torch.nn.Module):
def __init__(self):
super(NegLogLikehoodLoss, self).__init__()
def forward(self, positive_score, negative_score):
softplus = (lambda x: torch.log((1 + torch.exp(x))))
output = (softplus((- positive_score)) + softplus(negative_score))
return torch.mean(output) |
class DetectAnomaly(plc.Callback):
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, unused=0):
if (not (loss := outputs['loss']).isfinite()):
raise ValueError(f'Detected NaN/Infinite loss: "{loss}"') |
def _set_common_bokeh_fig_props(fig):
fig.toolbar.active_drag = None
fig.toolbar.active_scroll = None
fig.toolbar.active_tap = None
fig.outline_line_color = '#333333'
fig.outline_line_width = 1
fig.outline_line_alpha = 0.7
fig.title.text_font_size = '10px'
fig.legend.label_text_font_size = '10px'
fig.legend.spacing = 0
fig.legend.margin = 3
fig.legend.label_standoff = 5
fig.legend.label_height = 0
fig.xaxis.ticker.desired_num_ticks = 21
fig.xaxis.formatter = bokeh.models.DatetimeTickFormatter(days=['%b-%d'])
fig.xaxis.major_label_orientation = ((3.1415 / 4) + 0.5)
fig.xaxis.axis_label_text_font_size = '16px'
fig.xaxis.major_label_text_font_size = '10px'
fig.xaxis.axis_label_text_font_style = 'normal'
fig.y_range.start = 0
fig.yaxis.axis_label_text_font_size = '10px'
fig.yaxis.axis_label_text_font_style = 'normal'
fig.yaxis.major_label_text_font_size = '10px' |
class GPS():
def __init__(self, port='/dev/ttyUSB0'):
self.serial = None
self.port = port
self.stop_read_event = threading.Event()
self.read_cyclic = threading.Thread(target=self.read_data, args=())
self.x = 0.0
self.y = 0.0
self.t = 0.0
def start(self):
try:
self.serial = serial.Serial(self.port, 115200)
except:
print('Error when open GPS')
self.stop_read_event.clear()
self.read_cyclic.start()
def close(self):
self.stop_read_event.set()
self.serial.close()
def read_data(self):
while (not self.stop_read_event.is_set()):
data = self.serial.readline()
try:
data = data.decode()
except:
continue
while (data[:6] != '$GPGGA'):
data = self.serial.readline()
try:
data = data.decode()
except:
continue
self.t = time.time()
(self.x, self.y) = self.parseGPS(data)
def get(self):
return (self.x, self.y, self.t)
def parseGPS(self, line):
try:
data = line.split(',')
latitude = data[2]
longtitude = data[4]
lan_degree = latitude[:2]
lan_minute = latitude[2:]
latitude = (float(lan_degree) + (float(lan_minute) / 60))
long_degree = longtitude[:3]
long_minute = longtitude[3:]
longtitude = (float(long_degree) + (float(long_minute) / 60))
(x, y) = self.gps2xy(latitude, longtitude)
return (x, y)
except:
return (self.x, self.y)
def gps2xy(self, latitude, longtitude):
latitude = ((latitude * math.pi) / 180)
longtitude = ((longtitude * math.pi) / 180)
radius = 6378137
distance = 6356752.3142
base = ((30 * math.pi) / 180)
radius_square = pow(radius, 2)
distance_square = pow(distance, 2)
e = math.sqrt((1 - (distance_square / radius_square)))
e2 = math.sqrt(((radius_square / distance_square) - 1))
cosb0 = math.cos(base)
N = ((radius_square / distance) / math.sqrt((1 + (pow(e2, 2) * pow(cosb0, 2)))))
K = (N * cosb0)
sinb = math.sin(latitude)
tanv = math.tan(((math.pi / 4) + (latitude / 2)))
E2 = pow(((1 - (e * sinb)) / (1 + (e * sinb))), (e / 2))
xx = (tanv * E2)
xc = (K * math.log(xx))
yc = (K * longtitude)
return (xc, yc) |
def read_opt_def(filename, total_site):
rf = open(filename, 'r')
_arr = np.zeros(total_site, dtype=complex)
for line in rf.readlines()[5:]:
line1 = line.split()
_arr[int(line1[0])] = (float(line1[1]) + (1j * float(line1[2])))
rf.close()
return _arr |
class MXNetDataLoader(BaseDataLoader):
def _generate_dataloader(self, dataset, batch_size, last_batch, collate_fn, sampler, batch_sampler, num_workers, pin_memory, shuffle, distributed):
if shuffle:
logging.warning('Shuffle is not supported yet in MXNetDataLoader, ignoring shuffle keyword.')
return mx.gluon.data.DataLoader(dataset, batch_size=batch_size, batchify_fn=collate_fn, last_batch=last_batch, num_workers=num_workers, pin_memory=pin_memory, sampler=sampler, batch_sampler=batch_sampler) |
def eval_macro_pw_f1(group2pred, group2gold):
def clusters2dict(assgn):
d = collections.defaultdict(list)
for (idx, c) in enumerate(assgn):
d[c].append(idx)
return d
scores = []
assert (len(group2pred) == len(group2gold))
for (pred, gold) in zip(group2pred, group2gold):
pred_pairs = set([x for c in clusters2dict(pred).values() for x in pairs_from_cluster(c)])
gold_pairs = set([x for c in clusters2dict(gold).values() for x in pairs_from_cluster(c)])
scores.append(pairwise_stats(pred_pairs, gold_pairs))
prec = sum([x['micro_pw_prec'] for x in scores])
rec = sum([x['micro_pw_rec'] for x in scores])
f1 = (((2.0 * prec) * rec) / (prec + rec))
res = dict()
res['macro_pw_prec'] = prec
res['macro_pw_rec'] = rec
res['macro_pw_f1'] = f1
return res |
def main():
(list_of_info, keyprefix, rec2waypoints_fout, rec2callsign_list_fout) = sys.argv[1:]
rec2waypoints = []
rec2callsign_list = []
with open(list_of_info) as fd:
for line in fd:
print(line.strip(), file=sys.stderr)
info_file = line.strip()
reco_key = (keyprefix + info_file.split('/')[(- 1)].rsplit('.', maxsplit=1)[0])
with open(info_file, 'r') as info_fd:
waypoints = ''
for line in info_fd:
if re.search('^waypoints nearby:', line):
waypoints = re.sub('^waypoints nearby:', '', line).strip()
break
rec2waypoints.append([reco_key, waypoints])
with open(info_file, 'r') as info_fd:
callsign_list = []
for line in info_fd:
if re.search('^callsigns nearby:', line):
for line in info_fd:
if (len(line.strip()) == 0):
break
if re.search(':', line):
callsign_list.append(line.strip().split()[0])
else:
callsign_list.extend(line.strip().split())
rec2callsign_list.append([reco_key, ' '.join(callsign_list)])
np.savetxt(rec2waypoints_fout, rec2waypoints, fmt='%s')
np.savetxt(rec2callsign_list_fout, rec2callsign_list, fmt='%s') |
def is_ckpt_format(model_path):
file_list = [os.path.splitext(i)[(- 1)] for i in os.listdir(model_path)]
if ((file_list.count('.meta') == 1) and (file_list.count('.index') == 1)):
return True
return False |
def train(gpmodule, optimizer=None, loss_fn=None, retain_graph=None, num_steps=1000):
optimizer = (torch.optim.Adam(gpmodule.parameters(), lr=0.01) if (optimizer is None) else optimizer)
loss_fn = (TraceMeanField_ELBO().differentiable_loss if (loss_fn is None) else loss_fn)
def closure():
optimizer.zero_grad()
loss = loss_fn(gpmodule.model, gpmodule.guide)
torch_backward(loss, retain_graph)
return loss
losses = []
with tqdm.trange(num_steps) as bar:
for epoch in bar:
loss = optimizer.step(closure)
losses.append(torch_item(loss))
postfix = dict(Loss=f'{torch_item(loss):.3f}')
bar.set_postfix(postfix)
return losses |
def get_val(book: List[PriceLevel], level: int) -> Tuple[(int, int)]:
if (book == []):
return (0, 0)
else:
try:
price = book[level][0]
volume = book[level][1]
return (price, volume)
except:
return (0, 0) |
def eval_mesh(mesh_pred, mesh_gt, bb_min, bb_max, n_points=100000):
(pointcloud_pred, idx) = mesh_pred.sample(n_points, return_index=True)
pointcloud_pred = pointcloud_pred.astype(np.float32)
normals_pred = mesh_pred.face_normals[idx]
(pointcloud_gt, idx) = mesh_gt.sample(n_points, return_index=True)
pointcloud_gt = pointcloud_gt.astype(np.float32)
normals_gt = mesh_gt.face_normals[idx]
out_dict = eval_pointcloud(pointcloud_pred, pointcloud_gt, normals_pred, normals_gt)
bb_len = (bb_max - bb_min)
bb_samples = ((np.random.rand((n_points * 10), 3) * bb_len) + bb_min)
occ_pred = implicit_waterproofing(mesh_pred, bb_samples)[0]
occ_gt = implicit_waterproofing(mesh_gt, bb_samples)[0]
area_union = (occ_pred | occ_gt).astype(np.float32).sum()
area_intersect = (occ_pred & occ_gt).astype(np.float32).sum()
out_dict['iou'] = (area_intersect / area_union)
return out_dict |
def dataclass_to_dict(obj):
return {k: v for (k, v) in obj.__dict__.items() if (not k.startswith('_'))} |
class User():
def __init__(self, ARCH, DATA, datadir, preddir, logdir, modeldir):
self.ARCH = ARCH
self.DATA = DATA
self.datadir = datadir
self.preddir = preddir
self.logdir = logdir
self.modeldir = modeldir
parserModule = imp.load_source('parserModule', (((booger.TRAIN_PATH + '/tasks/semantic/dataset/') + self.DATA['name']) + '/vis_parser.py'))
self.parser = parserModule.Parser(root=self.datadir, pred_root=preddir, train_sequences=self.DATA['split']['train'], valid_sequences=self.DATA['split']['valid'], test_sequences=self.DATA['split']['test'], labels=self.DATA['labels'], color_map=self.DATA['color_map'], learning_map=self.DATA['learning_map'], learning_map_inv=self.DATA['learning_map_inv'], sensor=self.ARCH['dataset']['sensor'], mode='test', frame_num=4, max_points=self.ARCH['dataset']['max_points'], batch_size=4, workers=self.ARCH['train']['workers'], gt=True, shuffle_train=False)
self.post = None
def infer(self):
self.infer_subset(loader=self.parser.get_valid_set(), to_orig_fn=self.parser.to_original)
print('Finished Infering')
return
def infer_subset(self, loader, to_orig_fn):
with torch.no_grad():
end = time.time()
for (i, (proj_colors, path_seq, path_name)) in enumerate(loader):
if self.post:
pass
else:
for batch_idx in range(proj_colors.shape[0]):
for frame_idx in range(proj_colors.shape[1]):
cur_proj_colors = proj_colors[(batch_idx, frame_idx)].numpy()
img_path = os.path.join(self.logdir, 'sequences', path_seq[frame_idx][batch_idx], 'imgs')
os.makedirs(img_path, exist_ok=True)
img_file = os.path.join(img_path, path_name[frame_idx][batch_idx].replace('.label', '.jpg'))
print(img_file)
scipy.misc.imsave(img_file, cur_proj_colors)
print('Infered seq', path_seq[frame_idx][batch_idx], 'scan', path_name[frame_idx][batch_idx], 'in', (time.time() - end), 'sec')
end = time.time() |
def set_homotopy_continuation_gamma(regamma=0, imgamma=0):
from phcpy.phcpy2c3 import py2c_padcon_set_homotopy_continuation_gamma
if ((regamma == 0) and (imgamma == 0)):
regm = float(input('-> give the real part of gamma : '))
imgm = float(input('-> give the imaginary part of gamma : '))
return py2c_padcon_set_homotopy_continuation_gamma(regm, imgm)
else:
return py2c_padcon_set_homotopy_continuation_gamma(regamma, imgamma) |
def anneal_dsm_score_estimation(scorenet, samples, labels, sigmas, anneal_power=2.0):
used_sigmas = sigmas[labels].view(samples.shape[0], *([1] * len(samples.shape[1:])))
perturbed_samples = (samples + (torch.randn_like(samples) * used_sigmas))
target = (((- 1) / (used_sigmas ** 2)) * (perturbed_samples - samples))
scores = scorenet(perturbed_samples, labels)
target = target.view(target.shape[0], (- 1))
scores = scores.view(scores.shape[0], (- 1))
loss = (((1 / 2.0) * ((scores - target) ** 2).sum(dim=(- 1))) * (used_sigmas.squeeze() ** anneal_power))
return loss.mean(dim=0) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, prob=None, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.prob = prob
def forward(self, x):
residual = x
if (self.downsample is not None):
residual = self.downsample(x)
choice = np.random.binomial(size=1, n=1, p=self.prob)[0]
if (choice == 0):
return residual
else:
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out /= self.prob
out += residual
out = self.relu(out)
return out |
def init_spark_on_k8s(master, container_image, conda_name, num_executors, executor_cores, executor_memory='2g', driver_memory='2g', driver_cores=4, extra_executor_memory_for_ray=None, extra_python_lib=None, penv_archive=None, spark_log_level='WARN', redirect_spark_log=True, jars=None, conf=None, python_location=None):
from bigdl.dllib.utils.spark import SparkRunner
runner = SparkRunner(spark_log_level=spark_log_level, redirect_spark_log=redirect_spark_log)
sc = runner.init_spark_on_k8s(master=master, container_image=container_image, conda_name=conda_name, num_executors=num_executors, executor_cores=executor_cores, executor_memory=executor_memory, driver_memory=driver_memory, driver_cores=driver_cores, extra_executor_memory_for_ray=extra_executor_memory_for_ray, extra_python_lib=extra_python_lib, penv_archive=penv_archive, jars=jars, conf=conf, python_location=python_location)
return sc |
def get_model_normalizer(model_path: str) -> Optional['sf.norm.StainNormalizer']:
config = sf.util.get_model_config(model_path)
if is_torch_model_path(model_path):
backend = 'torch'
elif is_tensorflow_model_path(model_path):
backend = 'tensorflow'
else:
log.warn(f'Unable to determine backend for model at {model_path}')
backend = None
if (not config['hp']['normalizer']):
return None
if (('slideflow_version' in config) and (version.parse(config['slideflow_version']) <= version.parse('1.2.2')) and (config['hp']['normalizer'] in ('vahadane', 'macenko'))):
log.warn('Detected model trained with Macenko or Vahadane normalization with Slideflow version <= 1.2.2. Macenko and Vahadane algorithms were optimized in 1.2.3 and may now yield slightly different results. ')
normalizer = sf.norm.autoselect(config['hp']['normalizer'], config['hp']['normalizer_source'], backend=backend)
if (('norm_fit' in config) and (config['norm_fit'] is not None)):
normalizer.set_fit(**config['norm_fit'])
return normalizer |
def load_module(filename):
module_name = os.path.splitext(os.path.basename(filename))[0]
return SourceFileLoader(module_name, filename).load_module() |
def collate_to_max_length_for_train_dynamic_pron_loss(batch: List[List[torch.Tensor]], max_len: int=None, fill_values: List[float]=None) -> List[torch.Tensor]:
lengths = np.array([[len(field_data) for field_data in sample] for sample in batch])
(batch_size, num_fields) = lengths.shape
fill_values = (fill_values or ([0.0] * num_fields))
max_lengths = lengths.max(axis=0)
if max_len:
assert (max_lengths.max() <= max_len)
max_lengths = (np.ones_like(max_lengths) * max_len)
output = [torch.full((batch_size, max_lengths[field_idx]), fill_value=fill_values[field_idx], dtype=batch[0][field_idx].dtype) for field_idx in range((num_fields - 1))]
output.append(torch.full((batch_size, max_lengths[(- 1)], 3), fill_value=fill_values[(- 1)], dtype=batch[0][(- 1)].dtype))
for sample_idx in range(batch_size):
for field_idx in range(num_fields):
data = batch[sample_idx][field_idx]
output[field_idx][sample_idx][:data.shape[0]] = data
return output |
class HTMLProgressBar(BaseProgressBar):
def __init__(self):
super().__init__()
self.progress_bar = None
self.label = None
self.box = None
self._init_subscriber()
def _init_subscriber(self):
def _initialize_progress_bar(num_tasks):
self.start(num_tasks)
self.subscribe('terra.parallel.start', _initialize_progress_bar)
def _update_progress_bar(progress):
self.update(progress)
self.subscribe('terra.parallel.done', _update_progress_bar)
def _finish_progress_bar():
self.unsubscribe('terra.parallel.start', _initialize_progress_bar)
self.unsubscribe('terra.parallel.done', _update_progress_bar)
self.unsubscribe('terra.parallel.finish', _finish_progress_bar)
self.finished()
self.subscribe('terra.parallel.finish', _finish_progress_bar)
def start(self, iterations):
self.touched = True
self.iter = int(iterations)
self.t_start = time.time()
self.progress_bar = widgets.IntProgress(min=0, max=self.iter, value=0)
self.progress_bar.bar_style = 'info'
self.label = widgets.HTML()
self.box = widgets.VBox(children=[self.label, self.progress_bar])
display(self.box)
def update(self, n):
self.progress_bar.value += 1
lbl = 'Completed %s/%s: Est. remaining time: %s.'
self.label.value = (lbl % (n, self.iter, self.time_remaining_est(n)))
def finished(self):
self.t_done = time.time()
self.progress_bar.bar_style = 'success'
self.label.value = ('Elapsed time: %s' % self.time_elapsed()) |
class VQAEval():
def __init__(self, q_2_annotation, q_2_answer, n=2):
self.n = n
self.accuracy = {}
self.evalQA = {}
self.evalQuesType = {}
self.evalAnsType = {}
self.q_2_annotat = q_2_annotation
self.q_2_ans = q_2_answer
self.contractions = {'aint': "ain't", 'arent': "aren't", 'cant': "can't", 'couldve': "could've", 'couldnt': "couldn't", "couldn'tve": "couldn't've", "couldnt've": "couldn't've", 'didnt': "didn't", 'doesnt': "doesn't", 'dont': "don't", 'hadnt': "hadn't", "hadnt've": "hadn't've", "hadn'tve": "hadn't've", 'hasnt': "hasn't", 'havent': "haven't", 'hed': "he'd", "hed've": "he'd've", "he'dve": "he'd've", 'hes': "he's", 'howd': "how'd", 'howll': "how'll", 'hows': "how's", "Id've": "I'd've", "I'dve": "I'd've", 'Im': "I'm", 'Ive': "I've", 'isnt': "isn't", 'itd': "it'd", "itd've": "it'd've", "it'dve": "it'd've", 'itll': "it'll", "let's": "let's", 'maam': "ma'am", 'mightnt': "mightn't", "mightnt've": "mightn't've", "mightn'tve": "mightn't've", 'mightve': "might've", 'mustnt': "mustn't", 'mustve': "must've", 'neednt': "needn't", 'notve': "not've", 'oclock': "o'clock", 'oughtnt': "oughtn't", "ow's'at": "'ow's'at", "'ows'at": "'ow's'at", "'ow'sat": "'ow's'at", 'shant': "shan't", "shed've": "she'd've", "she'dve": "she'd've", "she's": "she's", 'shouldve': "should've", 'shouldnt': "shouldn't", "shouldnt've": "shouldn't've", "shouldn'tve": "shouldn't've", "somebody'd": 'somebodyd', "somebodyd've": "somebody'd've", "somebody'dve": "somebody'd've", 'somebodyll': "somebody'll", 'somebodys': "somebody's", 'someoned': "someone'd", "someoned've": "someone'd've", "someone'dve": "someone'd've", 'someonell': "someone'll", 'someones': "someone's", 'somethingd': "something'd", "somethingd've": "something'd've", "something'dve": "something'd've", 'somethingll': "something'll", 'thats': "that's", 'thered': "there'd", "thered've": "there'd've", "there'dve": "there'd've", 'therere': "there're", 'theres': "there's", 'theyd': "they'd", "theyd've": "they'd've", "they'dve": "they'd've", 'theyll': "they'll", 'theyre': "they're", 'theyve': "they've", 'twas': "'twas", 'wasnt': "wasn't", "wed've": "we'd've", "we'dve": "we'd've", 'weve': "we've", 'werent': "weren't", 'whatll': "what'll", 'whatre': "what're", 'whats': "what's", 'whatve': "what've", 'whens': "when's", 'whered': "where'd", 'wheres': "where's", 'whereve': "where've", 'whod': "who'd", "whod've": "who'd've", "who'dve": "who'd've", 'wholl': "who'll", 'whos': "who's", 'whove': "who've", 'whyll': "why'll", 'whyre': "why're", 'whys': "why's", 'wont': "won't", 'wouldve': "would've", 'wouldnt': "wouldn't", "wouldnt've": "wouldn't've", "wouldn'tve": "wouldn't've", 'yall': "y'all", "yall'll": "y'all'll", "y'allll": "y'all'll", "yall'd've": "y'all'd've", "y'alld've": "y'all'd've", "y'all'dve": "y'all'd've", 'youd': "you'd", "youd've": "you'd've", "you'dve": "you'd've", 'youll': "you'll", 'youre': "you're", 'youve': "you've"}
self.manualMap = {'none': '0', 'zero': '0', 'one': '1', 'two': '2', 'three': '3', 'four': '4', 'five': '5', 'six': '6', 'seven': '7', 'eight': '8', 'nine': '9', 'ten': '10'}
self.articles = ['a', 'an', 'the']
self.periodStrip = re.compile('(?!<=\\d)(\\.)(?!\\d)')
self.commaStrip = re.compile('(\\d)(\\,)(\\d)')
self.punct = [';', '/', '[', ']', '"', '{', '}', '(', ')', '=', '+', '\\', '_', '-', '>', '<', '', '`', ',', '?', '!']
def evaluate(self):
gts = self.q_2_annotat
res = self.q_2_ans
quesIds = self.q_2_ans.keys()
accQA = []
accQuesType = {}
accAnsType = {}
print
step = 0
for quesId in quesIds:
resAns = res[quesId]['answer']
resAns = resAns.replace('\n', ' ')
resAns = resAns.replace('\t', ' ')
resAns = resAns.strip()
resAns = self.processPunctuation(resAns)
resAns = self.processDigitArticle(resAns)
gtAcc = []
gtAnswers = [ans['answer'] for ans in gts[quesId]['answers']]
if (len(set(gtAnswers)) > 1):
for ansDic in gts[quesId]['answers']:
ansDic['answer'] = self.processPunctuation(ansDic['answer'])
for gtAnsDatum in gts[quesId]['answers']:
otherGTAns = [item for item in gts[quesId]['answers'] if (item != gtAnsDatum)]
matchingAns = [item for item in otherGTAns if (item['answer'] == resAns)]
acc = min(1, (float(len(matchingAns)) / 3))
gtAcc.append(acc)
quesType = gts[quesId]['question_type']
ansType = gts[quesId]['answer_type']
avgGTAcc = (float(sum(gtAcc)) / len(gtAcc))
accQA.append(avgGTAcc)
if (quesType not in accQuesType):
accQuesType[quesType] = []
accQuesType[quesType].append(avgGTAcc)
if (ansType not in accAnsType):
accAnsType[ansType] = []
accAnsType[ansType].append(avgGTAcc)
self.setEvalQA(quesId, avgGTAcc)
self.setEvalQuesType(quesId, quesType, avgGTAcc)
self.setEvalAnsType(quesId, ansType, avgGTAcc)
if ((step % 100) == 0):
self.updateProgress((step / float(len(quesIds))))
step = (step + 1)
self.setAccuracy(accQA, accQuesType, accAnsType)
print('Done computing accuracy')
def processPunctuation(self, inText):
outText = inText
for p in self.punct:
if ((((p + ' ') in inText) or ((' ' + p) in inText)) or (re.search(self.commaStrip, inText) is not None)):
outText = outText.replace(p, '')
else:
outText = outText.replace(p, ' ')
outText = self.periodStrip.sub('', outText, re.UNICODE)
return outText
def processDigitArticle(self, inText):
outText = []
tempText = inText.lower().split()
for word in tempText:
word = self.manualMap.setdefault(word, word)
if (word not in self.articles):
outText.append(word)
else:
pass
for (wordId, word) in enumerate(outText):
if (word in self.contractions):
outText[wordId] = self.contractions[word]
outText = ' '.join(outText)
return outText
def setAccuracy(self, accQA, accQuesType, accAnsType):
self.accuracy['overall'] = round(((100 * float(sum(accQA))) / len(accQA)), self.n)
self.accuracy['perQuestionType'] = {quesType: round(((100 * float(sum(accQuesType[quesType]))) / len(accQuesType[quesType])), self.n) for quesType in accQuesType}
self.accuracy['perAnswerType'] = {ansType: round(((100 * float(sum(accAnsType[ansType]))) / len(accAnsType[ansType])), self.n) for ansType in accAnsType}
def setEvalQA(self, quesId, acc):
self.evalQA[quesId] = round((100 * acc), self.n)
def setEvalQuesType(self, quesId, quesType, acc):
if (quesType not in self.evalQuesType):
self.evalQuesType[quesType] = {}
self.evalQuesType[quesType][quesId] = round((100 * acc), self.n)
def setEvalAnsType(self, quesId, ansType, acc):
if (ansType not in self.evalAnsType):
self.evalAnsType[ansType] = {}
self.evalAnsType[ansType][quesId] = round((100 * acc), self.n)
def updateProgress(self, progress):
barLength = 20
status = ''
if isinstance(progress, int):
progress = float(progress)
if (not isinstance(progress, float)):
progress = 0
status = 'error: progress var must be float\r\n'
if (progress < 0):
progress = 0
status = 'Halt...\r\n'
if (progress >= 1):
progress = 1
status = 'Done...\r\n'
block = int(round((barLength * progress)))
text = '\rFinshed Percent: [{0}] {1}% {2}'.format((('#' * block) + ('-' * (barLength - block))), int((progress * 100)), status)
sys.stdout.write(text)
sys.stdout.flush() |
def gen_search_space(block_list, block_id):
the_block = block_list[block_id]
student_blocks_list_list = []
if isinstance(the_block, super_blocks.SuperConvKXBNRELU):
student_blocks_list = []
student_out_channels_list = get_select_student_channels_list(the_block.out_channels)
for student_out_channels in student_out_channels_list:
tmp_block_str = (type(the_block).__name__ + '({},{},{},1)'.format(the_block.in_channels, student_out_channels, the_block.stride))
student_blocks_list.append(tmp_block_str)
pass
student_blocks_list = list(set(student_blocks_list))
assert (len(student_blocks_list) >= 1)
student_blocks_list_list.append(student_blocks_list)
else:
for student_block_type_list in seach_space_block_type_list_list:
student_blocks_list = []
student_out_channels_list = get_select_student_channels_list(the_block.out_channels)
student_sublayers_list = get_select_student_sublayers_list(sub_layers=the_block.sub_layers)
student_bottleneck_channels_list = get_select_student_channels_list(the_block.bottleneck_channels)
for student_block_type in student_block_type_list:
for (student_out_channels, student_sublayers, student_bottleneck_channels) in itertools.product(student_out_channels_list, student_sublayers_list, student_bottleneck_channels_list):
min_possible_channels = __block_type_round_channels_base_dict__[student_block_type]
channel_round_base = __block_type_round_channels_base_dict__[student_block_type]
student_out_channels = global_utils.smart_round(student_out_channels, channel_round_base)
student_bottleneck_channels = global_utils.smart_round(student_bottleneck_channels, channel_round_base)
if ((student_out_channels < min_possible_channels) or (student_bottleneck_channels < min_possible_channels)):
continue
if (student_sublayers <= 0):
continue
tmp_block_str = (student_block_type.__name__ + '({},{},{},{},{})'.format(the_block.in_channels, student_out_channels, the_block.stride, student_bottleneck_channels, student_sublayers))
student_blocks_list.append(tmp_block_str)
pass
student_blocks_list = list(set(student_blocks_list))
assert (len(student_blocks_list) >= 1)
student_blocks_list_list.append(student_blocks_list)
pass
pass
pass
return student_blocks_list_list |
class Enc(nn.Module):
def __init__(self, latentDim):
super(Enc, self).__init__()
self.embedding = nn.Embedding(vocabSize, embeddingDim, padding_idx=0)
self.enc = nn.Sequential(nn.Conv2d(1, fBase, 4, 2, 1, bias=False), nn.BatchNorm2d(fBase), nn.ReLU(True), nn.Conv2d(fBase, (fBase * 2), 4, 2, 1, bias=False), nn.BatchNorm2d((fBase * 2)), nn.ReLU(True), nn.Conv2d((fBase * 2), (fBase * 4), 4, 2, 1, bias=False), nn.BatchNorm2d((fBase * 4)), nn.ReLU(True), nn.Conv2d((fBase * 4), (fBase * 4), (1, 4), (1, 2), (0, 1), bias=False), nn.BatchNorm2d((fBase * 4)), nn.ReLU(True), nn.Conv2d((fBase * 4), (fBase * 4), (1, 4), (1, 2), (0, 1), bias=False), nn.BatchNorm2d((fBase * 4)), nn.ReLU(True))
self.c1 = nn.Conv2d((fBase * 4), latentDim, 4, 1, 0, bias=False)
self.c2 = nn.Conv2d((fBase * 4), latentDim, 4, 1, 0, bias=False)
def forward(self, x):
e = self.enc(self.embedding(x.long()).unsqueeze(1))
(mu, logvar) = (self.c1(e).squeeze(), self.c2(e).squeeze())
return (mu, (F.softplus(logvar) + Constants.eta)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.