code stringlengths 101 5.91M |
|---|
def hed():
img_input = Input(shape=(480, 480, 3), name='input')
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
b1 = side_branch(x, 1)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block1_pool')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
b2 = side_branch(x, 2)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block2_pool')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
b3 = side_branch(x, 4)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block3_pool')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
b4 = side_branch(x, 8)
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same', name='block4_pool')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
b5 = side_branch(x, 16)
fuse = Concatenate(axis=(- 1))([b1, b2, b3, b4, b5])
fuse = Conv2D(1, (1, 1), padding='same', use_bias=False, activation=None)(fuse)
o1 = Activation('sigmoid', name='o1')(b1)
o2 = Activation('sigmoid', name='o2')(b2)
o3 = Activation('sigmoid', name='o3')(b3)
o4 = Activation('sigmoid', name='o4')(b4)
o5 = Activation('sigmoid', name='o5')(b5)
ofuse = Activation('sigmoid', name='ofuse')(fuse)
model = Model(inputs=[img_input], outputs=[o1, o2, o3, o4, o5, ofuse])
filepath = './models/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
model.compile(loss={'o1': cross_entropy_balanced, 'o2': cross_entropy_balanced, 'o3': cross_entropy_balanced, 'o4': cross_entropy_balanced, 'o5': cross_entropy_balanced, 'ofuse': cross_entropy_balanced}, metrics={'ofuse': ofuse_pixel_error}, optimizer='adam')
return model |
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(((4 * 4) * 50), 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view((- 1), ((4 * 4) * 50))
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1) |
class ModelEvaluator(Evaluator):
def __init__(self, dataset: Dataset, batch_size: int, num_workers: int, mixed_precision: bool=True):
self.dataset = dataset
self.mixed_precision = mixed_precision
self.loader = DataLoader(dataset, batch_size, shuffle=False, num_workers=num_workers, drop_last=False)
def num_batches(self):
return len(self.loader)
def evaluate(self, model: Classifier, device: Optional[Union[(torch.device, str)]]=None) -> Evaluator.Result:
return expand_generator(self.evaluate_iter(model, device), return_only=True)
def evaluate_iter(self, model: Classifier, device: Optional[Union[(torch.device, str)]]=None) -> Generator[(dict, None, Evaluator.Result)]:
with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled=self.mixed_precision):
mean_accuracy = 0.0
mean_log_loss = 0.0
for (i, (x, y)) in enumerate(self.loader):
x = x.to(device)
y = y.to(device)
logits = model(x)
correct = torch.sum((logits.argmax((- 1)) == y)).item()
log_loss = F.cross_entropy(logits, y, reduction='sum').item()
mean_accuracy += (correct / len(self.dataset))
mean_log_loss += (log_loss / len(self.dataset))
(yield dict(batch=i))
return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss) |
def videodata_kwargs(cfg):
return {'root': cfg.data.root, 'root_targets': cfg.data.root_targets, 'sources': cfg.data.sources, 'targets': cfg.data.targets, 'height': cfg.data.height, 'width': cfg.data.width, 'transforms': cfg.data.transforms, 'norm_mean': cfg.data.norm_mean, 'norm_std': cfg.data.norm_std, 'use_gpu': cfg.use_gpu, 'split_id': cfg.data.split_id, 'combineall': cfg.data.combineall, 'batch_size_train': cfg.train.batch_size, 'batch_size_test': cfg.test.batch_size, 'workers': cfg.data.workers, 'num_instances': cfg.sampler.num_instances, 'num_cams': cfg.sampler.num_cams, 'num_datasets': cfg.sampler.num_datasets, 'train_sampler': cfg.sampler.train_sampler, 'seq_len': cfg.video.seq_len, 'sample_method': cfg.video.sample_method} |
def make_vector_field(eval_func, x_bounds, y_bounds, *, resolution=10, info=None):
if (info is None):
info = {}
x_values = np.linspace(*x_bounds, num=resolution)
y_values = np.linspace(*y_bounds, num=resolution)
values = np.zeros((resolution, resolution))
dx_values = np.zeros((resolution, resolution))
dy_values = np.zeros((resolution, resolution))
for x in range(resolution):
for y in range(resolution):
(value, dx, dy) = eval_func(x_values[x], y_values[y])
values[(x, y)] = value
dx_values[(x, y)] = dx
dy_values[(x, y)] = dy
return VectorField(values=values, dx_values=dx_values, dy_values=dy_values, x_values=x_values, y_values=y_values, info=info) |
class Attention2d(torch.nn.Module):
def __init__(self, in_channels: int, out_channels: int, num_kernels: int, kernel_size: Tuple[(int, int)], padding_size: Tuple[(int, int)]):
super(Attention2d, self).__init__()
self.conv_depth = torch.nn.Conv2d(in_channels=in_channels, out_channels=(in_channels * num_kernels), kernel_size=kernel_size, padding=padding_size, groups=in_channels)
self.conv_point = torch.nn.Conv2d(in_channels=(in_channels * num_kernels), out_channels=out_channels, kernel_size=(1, 1))
self.bn = torch.nn.BatchNorm2d(num_features=out_channels)
self.activation = torch.nn.Sigmoid()
def forward(self, x: torch.Tensor, size: torch.Size) -> torch.Tensor:
x = F.adaptive_max_pool2d(x, size)
x = self.conv_depth(x)
x = self.conv_point(x)
x = self.bn(x)
x = self.activation(x)
return x |
class A000255(ExtremesOfPermanentsSequence):
def __init__(self):
SloaneSequence.__init__(self, offset=0)
self._b = []
self._a0a1d = (1, 1, 1)
self._precompute(2)
def _repr_(self):
return 'a(n) = n*a(n-1) + (n-1)*a(n-2), a(0) = 1, a(1) = 1.' |
class BasicAggModel(nn.Module):
def __init__(self, include_ff=True, include_res_ln=True, dropout=0.0, d_inner=2048, d_model=768, return_att_weights=False, n_head=8, d_k=96, n_rules=63, device='cpu', is_dense_bias=True):
super(BasicAggModel, self).__init__()
self.include_ff = include_ff
self.include_res_ln = include_res_ln
self.d_inner = d_inner
self.d_model = d_model
self.n_rules = n_rules
self.device = device
self.return_att_weights = return_att_weights
self.mha = MultiHeadAttention(n_head=n_head, d_k=d_k, d_v=d_k, dropout=dropout, return_att_weights=return_att_weights, include_res_ln=include_res_ln)
self.ff = PositionwiseFeedForward(self.d_model, self.d_inner, dropout=dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
self.dropout = nn.Dropout(p=dropout)
self.rule_encoder = nn.Embedding(self.n_rules, self.d_model)
if is_dense_bias:
self.dense_proj = nn.Linear(d_model, n_rules)
else:
self.dense_proj = nn.Linear(d_model, n_rules, bias=False)
for (name, p) in self.named_parameters():
if ((name != 'dense_proj.weight') and (p.dim() > 1)):
nn.init.xavier_uniform_(p)
def forward(self, query, keys, mask):
query = torch.unsqueeze(query, 1)
bs = query.size(0)
mask = torch.sum(mask, dim=(- 1))
mask = mask.unsqueeze(1)
values = keys
query = self.layer_norm(query)
keys = self.layer_norm(keys)
values = self.layer_norm(values)
(pred, att_weights) = self.mha(query, keys, values, mask)
if self.include_ff:
pred = self.ff(pred)
pred = self.dense_proj(pred)
if self.return_att_weights:
return (pred, att_weights)
else:
return (pred, None) |
.skipif((not limits.can_set_time_limit()), reason='Cannot set time limits on this system')
def test_hard_time_limit():
def preexec_fn():
limits.set_time_limit(10)
driver = [sys.executable, 'fast-downward.py']
parameters = ['--translate', '--translate-time-limit', '10s', 'misc/tests/benchmarks/gripper/prob01.pddl']
subprocess.check_call((driver + parameters), preexec_fn=preexec_fn, cwd=REPO_ROOT_DIR)
parameters = ['--translate', '--translate-time-limit', '20s', 'misc/tests/benchmarks/gripper/prob01.pddl']
with pytest.raises(subprocess.CalledProcessError) as exception_info:
subprocess.check_call((driver + parameters), preexec_fn=preexec_fn, cwd=REPO_ROOT_DIR)
assert (exception_info.value.returncode == returncodes.DRIVER_INPUT_ERROR) |
class KR_type_Dn_twistedElement(KirillovReshetikhinGenericCrystalElement):
def e0(self):
n = (self.parent().cartan_type().rank() - 1)
s = self.parent().s()
[b, l] = self.lift().to_highest_weight(index_set=list(range(2, (n + 1))))
pm = self.parent().from_highest_weight_vector_to_pm_diagram(b)
[l1, l2] = pm.pm_diagram[(n - 1)]
l3 = pm.pm_diagram[(n - 2)][0]
if ((((l1 + l2) + l3) == s) and (l1 == 0)):
return None
if (((l1 + l2) + l3) < s):
pm.pm_diagram[(n - 1)][1] = (l2 + 2)
pm.pm_diagram[n][0] -= 2
elif (l1 > 1):
pm.pm_diagram[(n - 1)][0] = (l1 - 2)
pm.pm_diagram[n][0] += 2
elif (l1 == 1):
pm.pm_diagram[(n - 1)][0] = 0
pm.pm_diagram[(n - 1)][1] = (l2 + 1)
pm = PMDiagram(pm.pm_diagram)
b = self.parent().from_pm_diagram_to_highest_weight_vector(pm)
b = b.f_string(reversed(l))
return self.parent().retract(b)
def f0(self):
n = (self.parent().cartan_type().rank() - 1)
s = self.parent().s()
[b, l] = self.lift().to_highest_weight(index_set=list(range(2, (n + 1))))
pm = self.parent().from_highest_weight_vector_to_pm_diagram(b)
[l1, l2] = pm.pm_diagram[(n - 1)]
l3 = pm.pm_diagram[(n - 2)][0]
if ((((l1 + l2) + l3) == s) and (l2 == 0)):
return None
if (((l1 + l2) + l3) < s):
pm.pm_diagram[(n - 1)][0] = (l1 + 2)
pm.pm_diagram[n][0] -= 2
elif (l2 > 1):
pm.pm_diagram[(n - 1)][1] = (l2 - 2)
pm.pm_diagram[n][0] += 2
elif (l2 == 1):
pm.pm_diagram[(n - 1)][1] = 0
pm.pm_diagram[(n - 1)][0] = (l1 + 1)
pm = PMDiagram(pm.pm_diagram)
b = self.parent().from_pm_diagram_to_highest_weight_vector(pm)
b = b.f_string(reversed(l))
return self.parent().retract(b)
def epsilon0(self):
n = (self.parent().cartan_type().rank() - 1)
[b, l] = self.lift().to_highest_weight(index_set=list(range(2, (n + 1))))
pm = self.parent().from_highest_weight_vector_to_pm_diagram(b)
l1 = pm.pm_diagram[(n - 1)][0]
l4 = pm.pm_diagram[n][0]
return (l1 + (l4 // 2))
def phi0(self):
n = (self.parent().cartan_type().rank() - 1)
(b, l) = self.lift().to_highest_weight(index_set=list(range(2, (n + 1))))
pm = self.parent().from_highest_weight_vector_to_pm_diagram(b)
l2 = pm.pm_diagram[(n - 1)][1]
l4 = pm.pm_diagram[n][0]
return (l2 + (l4 // 2)) |
.parametrize('dtype', [ti.i32, ti.f32, ti.i64, ti.f64])
.parametrize('shape', [(8,), (6, 12)])
.parametrize('offset', [0, (- 4), 4])
.parametrize('m, n', [(3, 4)])
_utils.test(arch=get_host_arch_list())
def test_matrix_to_numpy_with_offset(dtype, shape, offset, m, n):
import numpy as np
x = ti.Matrix.field(dtype=dtype, m=m, n=n, shape=shape, offset=([offset] * len(shape)))
x.fill(1.0)
numpy_dtypes = {ti.i32: np.int32, ti.f32: np.float32, ti.f64: np.float64, ti.i64: np.int64}
numpy_shape = (((shape,) if isinstance(shape, int) else shape) + (n, m))
arr = x.to_numpy()
assert np.allclose(arr, np.ones(numpy_shape, dtype=numpy_dtypes[dtype])) |
def feature_column_json_hook(obj):
if isinstance(obj, dict):
typ = obj.get('type')
if (typ in SUPPORTED_CONCRETE_FEATURE_COLUMNS):
return FeatureColumn.from_dict_or_feature_column(obj)
return obj |
class DecoderClassifier(nn.Module):
def __init__(self, config, embedding_weights):
super(DecoderClassifier, self).__init__()
self.cls = BertOnlyMLMHead(config, embedding_weights)
def forward(self, hidden_states):
cls_scores = self.cls(hidden_states)
return cls_scores |
_utils.test(require=ti.extension.assertion, debug=True, gdb_trigger=False)
def test_not_out_of_bound_with_offset():
x = ti.field(ti.i32, shape=(8, 16), offset=((- 4), (- 8)))
def func():
x[((- 4), (- 8))] = 1
x[(3, 7)] = 2
func() |
def read_frames_cv2_charades(video_path, num_frames, sample, start_sec=None, end_sec=None):
cap = cv2.VideoCapture(video_path)
assert cap.isOpened()
vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = cap.get(5)
if ((not start_sec) and (not end_sec)):
frame_idxs = sample_frames(num_frames, vlen, sample=sample)
else:
start_f = max(0, int((start_sec * fps)))
end_f = min(int((end_sec * fps)), vlen)
frame_idxs = sample_frames_start_end(num_frames, start_f, end_f, sample=sample)
frames = []
success_idxs = []
for index in frame_idxs:
cap.set(cv2.CAP_PROP_POS_FRAMES, (index - 1))
(ret, frame) = cap.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = torch.from_numpy(frame)
frame = frame.permute(2, 0, 1)
frames.append(frame)
success_idxs.append(index)
else:
pass
frames = (torch.stack(frames).float() / 255)
cap.release()
return (frames, success_idxs) |
class ETagResponseMixin(object):
def cache_control(self):
def on_update(cache_control):
if ((not cache_control) and ('cache-control' in self.headers)):
del self.headers['cache-control']
elif cache_control:
self.headers['Cache-Control'] = cache_control.to_header()
return parse_cache_control_header(self.headers.get('cache-control'), on_update, ResponseCacheControl)
def _wrap_response(self, start, length):
if (self.status_code == 206):
self.response = _RangeWrapper(self.response, start, length)
def _is_range_request_processable(self, environ):
return ((('HTTP_IF_RANGE' not in environ) or (not is_resource_modified(environ, self.headers.get('etag'), None, self.headers.get('last-modified'), ignore_if_range=False))) and ('HTTP_RANGE' in environ))
def _process_range_request(self, environ, complete_length=None, accept_ranges=None):
from ..exceptions import RequestedRangeNotSatisfiable
if ((accept_ranges is None) or (complete_length is None) or (not self._is_range_request_processable(environ))):
return False
parsed_range = parse_range_header(environ.get('HTTP_RANGE'))
if (parsed_range is None):
raise RequestedRangeNotSatisfiable(complete_length)
range_tuple = parsed_range.range_for_length(complete_length)
content_range_header = parsed_range.to_content_range_header(complete_length)
if ((range_tuple is None) or (content_range_header is None)):
raise RequestedRangeNotSatisfiable(complete_length)
content_length = (range_tuple[1] - range_tuple[0])
self.headers['Content-Length'] = content_length
self.headers['Accept-Ranges'] = accept_ranges
self.content_range = content_range_header
self.status_code = 206
self._wrap_response(range_tuple[0], content_length)
return True
def make_conditional(self, request_or_environ, accept_ranges=False, complete_length=None):
environ = _get_environ(request_or_environ)
if (environ['REQUEST_METHOD'] in ('GET', 'HEAD')):
if ('date' not in self.headers):
self.headers['Date'] =
accept_ranges = _clean_accept_ranges(accept_ranges)
is206 = self._process_range_request(environ, complete_length, accept_ranges)
if ((not is206) and (not is_resource_modified(environ, self.headers.get('etag'), None, self.headers.get('last-modified')))):
if parse_etags(environ.get('HTTP_IF_MATCH')):
self.status_code = 412
else:
self.status_code = 304
if (self.automatically_set_content_length and ('content-length' not in self.headers)):
length = self.calculate_content_length()
if (length is not None):
self.headers['Content-Length'] = length
return self
def add_etag(self, overwrite=False, weak=False):
if (overwrite or ('etag' not in self.headers)):
self.set_etag(generate_etag(self.get_data()), weak)
def set_etag(self, etag, weak=False):
self.headers['ETag'] = quote_etag(etag, weak)
def get_etag(self):
return unquote_etag(self.headers.get('ETag'))
def freeze(self, no_etag=False):
if (not no_etag):
self.add_etag()
super(ETagResponseMixin, self).freeze()
accept_ranges = header_property('Accept-Ranges', doc="The `Accept-Ranges` header. Even though the name would\n indicate that multiple values are supported, it must be one\n string token only.\n\n The values ``'bytes'`` and ``'none'`` are common.\n\n .. versionadded:: 0.7")
def content_range(self):
def on_update(rng):
if (not rng):
del self.headers['content-range']
else:
self.headers['Content-Range'] = rng.to_header()
rv = parse_content_range_header(self.headers.get('content-range'), on_update)
if (rv is None):
rv = ContentRange(None, None, None, on_update=on_update)
return rv
_range.setter
def content_range(self, value):
if (not value):
del self.headers['content-range']
elif isinstance(value, string_types):
self.headers['Content-Range'] = value
else:
self.headers['Content-Range'] = value.to_header() |
def write_file(file):
file.write(TXT)
for words in test_words():
record = Record()
for word in words:
record.add(word)
file.write(str(record))
file.write('\n') |
class EntropyEstimator(BaseEstimator, ABC, metaclass=EntropyEstimatorType):
def __init__(self):
self.estimate_ = None
self.err_ = None
self.input_data_ndim = 1
def __call__(self, nk, k=None, zk=None):
return self.fit(nk, k=k, zk=zk).estimate_
def algorithm(self):
return self.__class__.__name__
def check_alpha(a):
error_msg = ('alpha must be a positive number (got %r).' % a)
if (a is None):
raise AlphaError(error_msg)
try:
a = numpy.float64(a)
except ValueError:
raise AlphaError(error_msg)
if (a <= 0):
raise AlphaError(error_msg)
return a
def fit(self, nk, k=None, zk=None): |
def test_vector_draw(verbose=False):
np.random.seed(0)
ppg = pypolyagamma.PyPolyaGamma(np.random.randint((2 ** 16)))
n = 5
v2 = np.zeros(n)
a = (14 * np.ones(n, dtype=np.float))
b = (0 * np.ones(n, dtype=np.float))
ppg.pgdrawv(a, b, v2)
if verbose:
print(v2)
return True |
class Parser(argparse.ArgumentParser, ABC):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
model_args = self.add_argument_group('model_args')
self._add_model_args(model_args)
data_args = self.add_argument_group('data_args')
self._add_data_args(data_args)
partitioning_args = self.add_argument_group('partitioning_args')
self._add_partitioning_args(partitioning_args)
heuristics_args = self.add_argument_group('heuristics_args')
self._add_heurisitcs_args(heuristics_args)
presets_args = self.add_argument_group('presets')
self._add_presets_args(presets_args)
METIS_args = self.add_argument_group('METIS_args')
self._add_METIS_args(METIS_args)
acyclic_args = self.add_argument_group('acyclic_args')
self._add_acyclic_args(acyclic_args)
binpack_args = self.add_argument_group('binpack_args')
self._add_binpack_args(binpack_args)
mpipe_args = self.add_argument_group('mpipe_args')
self._add_mpipe_args(mpipe_args)
analysis_args = self.add_argument_group('analysis_args')
self._add_analysis_args(analysis_args)
extra_args = self.add_argument_group('extra_args')
extra_args.add_argument('--debug', action='store_true', default=False)
self._extra(extra_args)
self.set_defaults(**self._default_values())
def _add_model_args(self, group):
def _add_data_args(self, group):
def _add_analysis_args(self, group):
analysis_mode = group.add_mutually_exclusive_group()
analysis_mode.add_argument('--no_analysis', action='store_true', default=False, help='disable partition analysis')
analysis_mode.add_argument('--analysis_only', action='store_true', default=False, help='run only analysis for partitioned model')
group.add_argument('--analysis_batch_size', default=32, type=int, help='batch size to use during the post partition analysis')
group.add_argument('--analysis_as_async_pipeline', default=False, action='store_true', help='Force analysis as async pipeline')
def _add_heurisitcs_args(self, group):
group.add_argument('--bw', type=float, default=12, help='data transfer rate between gpus in GBps (Gigabytes per second)')
group.add_argument('--bwd_to_fwd_ratio', type=float, default=1, help='bwd to fwd ratio for heuristics')
group.add_argument('--auto_infer_node_bwd_to_fwd_ratio', action='store_true', default=False, help='Automatically infer bwd to fwd ratio for nodes (computation). Expected Ratio for edges should be given `by bwd_to_fwd_ratio`')
group.add_argument('--penalize_non_tensors', action='store_true', default=False, help='penalize edges with non tensor outputs by default no penalties are applied')
group.add_argument('--weight_mult_factor', type=float, default=10000.0, help='a constant to multiply weights with (useful if weights are really small)')
group.add_argument('--edge_penalty', type=float, default=.0, help='multiplicative penalty for edges if `penalize_non_tensors` is set')
def _add_partitioning_args(self, group):
group.add_argument('-b', '--partitioning_batch_size', type=int, default=128)
group.add_argument('-p', '--n_partitions', type=int, default=4)
group.add_argument('-o', '--output_file', default='')
group.add_argument('--disable_autogenerated_name', action='store_true', default=False)
group.add_argument('--n_iter', type=int, default=10, help='number of iteration used in order to profile the network and run analysis')
group.add_argument('--no_recomputation', action='store_true', default=False, help='whether to (not) use recomputation for the backward pass')
group.add_argument('--depth', default=10000, type=int, help='the depth in which we will partition the model')
group.add_argument('--basic_blocks', nargs='*', default=[])
group.add_argument('--use_network_profiler', default=False, action='store_true', help='whether to use the old network_profiler instead of the newer graph based profiler')
group.add_argument('--sanity_check', default=False, action='store_true', help='whether to use do sanity check after partitioning')
group.add_argument('--disable_op_profiling', default=False, action='store_true', help='weheter to not profile ops when using the GraphProfiler')
group.add_argument('--partitioning_method', '-m', choices=['acyclic', 'metis', '2dbin', 'mpipe', 'pipedream'], default='acyclic')
group.add_argument('--generate_explicit_del', action='store_true', default=False, help='whether to generate del statements in partitioned code')
group.add_argument('--no_activation_propagation', action='store_true', default=False, help='whether to not propgate activations in the pipeline, and having direct sends instead')
group.add_argument('-a', '--async_pipeline', default=False, action='store_true', help='Do partitioning and analysis for async pipeline')
group.add_argument('--dont_use_async_meta_alg', default=False, action='store_true', help='Explicitly avoid the async meta alg. (e.g when number of stages is big)')
group.add_argument('--dot', default=False, action='store_true', help='Save and plot it using graphviz')
group.add_argument('--save_memory_mode', default=False, action='store_true', help=('Save memory during profiling by storing everything on cpu,' + ' but sending each layer to GPU before the profiling.'))
group.add_argument('--trace_on_gpu', default=False, action='store_true', help='Used together with save_memory_mode: if true, will trace the model on GPU despite swapping during profiling.')
group.add_argument('--force_no_recomputation_scopes', nargs='*', default=[])
group.add_argument('--cp', '--profiles_cache_name', default='', type=str, dest='profiles_cache_name', help='Profile cache to use in case of multiple runs')
group.add_argument('--overwrite_profiles_cache', action='store_true', default=False, help='overwrite profile cache')
group.add_argument('--ct', '--trace_cache_name', default='', type=str, dest='trace_cache_name', help='Trace cache to use in case of multiple runs')
group.add_argument('--overwrite_trace_cache', action='store_true', default=False, help='overwrite trace cache')
def _add_METIS_args(self, group):
group.add_argument('--metis_attempts', type=int, default=10, help='number of attempts for running the METIS partitioning algorithm')
group.add_argument('--metis_verbose_on_error', action='store_true', default=False, help='whether to print the cause of the error')
group.add_argument('--metis_seed', required=False, type=int, help='Random seed for Metis algorithm')
group.add_argument('--metis_compress', default=False, action='store_true', help='Compress')
group.add_argument('--metis_niter', type=int, help='Specifies the number of iterations for the refinement algorithms at each stage of the uncoarsening process.Default is 10.')
group.add_argument('--metis_nseps', type=int, help='Specifies the number of different separators that it will compute at each level of nested dissection.The final separator that is used is the smallest one. Default is 1.')
group.add_argument('--metis_ncuts', type=int, help='Specifies the number of different partitionings that it will compute. The final partitioning is the one that achieves the best edgecut or communication volume.Default is 1.')
group.add_argument('--metis_dbglvl', type=int, help='Metis debug level. Refer to the docs for explanation')
group.add_argument('--metis_objtype', type=int, help='Extra objective type to miminize (0: edgecut, 1: vol, default: edgecut)')
group.add_argument('--metis_contig', default=False, action='store_true', help='A boolean to create contigous partitions.')
def _add_binpack_args(self, group):
group.add_argument('--n_clusters', default=2, type=int, help='number of clusters in the model')
group.add_argument('--analyze_n_clusters', action='store_true', default=False, help='analyze number of clusters')
group.add_argument('--reminder_policy', type=str, choices=list(ReminderPolicy._value2member_map_.keys()), default='last', help=f'Policy for reminder items in cluster, {ReminderPolicy._value2member_map_}')
group.add_argument('--second_and_on_cluster_policy', type=str, choices=list(SecondAndOnClusterPolicy._value2member_map_.keys()), default='best_fit', help=f'Policy for 2nd and on cluster {SecondAndOnClusterPolicy._value2member_map_}')
group.add_argument('--THRESHOLD', type=float, default=0, help='values <= threshold will be contagious with closest stage')
def _add_mpipe_args(self, group):
group.add_argument('--special_blocks', type=str, nargs='*', default=[])
group.add_argument('--L', nargs='*', type=int, default=[])
def _add_acyclic_args(self, group):
group.add_argument('--epsilon', default=0.1, type=float, help='imbalance factor')
group.add_argument('--rounds', default=10, type=int, help='number of optimization rounds default is 10')
group.add_argument('--allocated_seconds', default=20, type=int, help='run time allocated to the partitioning algorithm default is 20 seconds')
group.add_argument('--multilevel', action='store_true', default=False, help='whether to use multilevel partitioning algorithm')
group.add_argument('--objective', choices=['edge_cut', 'stage_time'], default='stage_time', help='partitioning optimization objective')
group.add_argument('--constraint', choices=['time', 'memory'], default='time', help='partitioning constraint')
group.add_argument('--maximum_constraint_value', required=False, type=float, default=None, help='maximum constraint value a single stage can have,for example for memory constraint this is the maximum number of parameters a stage can have')
def _add_presets_args(self, group):
group.add_argument('--preset', choices=['ftpipe', 'pipedream', 'gpipe'], required=False, help='set preset partitioning and analysis arguments')
def parse_presets(self, args):
if (args.preset == 'ftpipe'):
args.async_pipeline = True
args.bwd_to_fwd_ratio = 1
elif (args.preset == 'pipedream'):
args.async_pipeline = False
args.bwd_to_fwd_ratio = 1
args.analysis_as_async_pipeline = True
elif (args.preset == 'gpipe'):
args.auto_infer_node_bwd_to_fwd_ratio = True
args.async_pipeline = False
args.bwd_to_fwd_ratio = (- 1)
args.analysis_as_async_pipeline = False
elif args.preset:
raise NotImplementedError()
return args
def _extra(self, group):
def _default_values(self):
return dict()
def _post_parse(self, args, argv):
if argv:
msg = gettext('unrecognized arguments: %s')
self.error((msg % ' '.join(argv)))
return args
def _auto_file_name(self, args) -> str:
return ''
def parse_args(self, args=None, namespace=None) -> Namespace:
(args, extra) = super().parse_known_args(args, namespace)
self.parse_presets(args)
args.acyclic_opt = self._acyclic_opts_dict_from_parsed_args(args)
args.METIS_opt = self._metis_opts_dict_from_parsed_args(args)
args.binpack_opt = self._binpack_opts_dict_from_parsed_args(args)
args.mpipe_opt = self._mpipe_opts_dict_from_parsed_args(args)
if ((not args.disable_autogenerated_name) or (not args.output_file)):
args.output_file = self._auto_file_name(args)
if args.output_file.endswith('.py'):
args.output_file = args.output_file[:(- 3)]
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
args.device = device
args.force_no_recomputation_scopes_fn = (lambda scope: any(((s in scope) for s in args.force_no_recomputation_scopes)))
return self._post_parse(args, extra)
def _acyclic_opts_dict_from_parsed_args(args):
if (args.objective == 'edge_cut'):
objective = Objective.EDGE_CUT
else:
objective = Objective.STAGE_TIME
if args.multilevel:
meta_algorithm = META_ALGORITH.MULTI_LEVEL
else:
meta_algorithm = META_ALGORITH.SINGLE_LEVEL
if (args.constraint == 'time'):
constraint = Constraint.TIME
else:
constraint = Constraint.MEMORY
return {'epsilon': args.epsilon, 'rounds': args.rounds, 'allocated_seconds': args.allocated_seconds, 'meta_algorithm': meta_algorithm, 'objective': objective, 'constraint': constraint, 'maximum_constraint_value': args.maximum_constraint_value}
def _metis_opts_dict_from_parsed_args(args):
METIS_opt = {'verbose_on_error': getattr(args, 'metis_verbose_on_error', False), 'attempts': getattr(args, 'metis_attempts', 1000), 'seed': getattr(args, 'metis_seed', None), 'nseps': getattr(args, 'nseps', None), 'niter': getattr(args, 'metis_niter', None), 'compress': getattr(args, 'metis_compress', None), 'ncuts': getattr(args, 'metis_ncuts', None), 'objtype': getattr(args, 'metis_objtype', None), 'contig': getattr(args, 'metis_contig', None), '_dbglvl': 1}
return METIS_opt
def _binpack_opts_dict_from_parsed_args(self, args):
d = dict()
d['n_clusters'] = args.n_clusters
d['analyze_n_clusters'] = args.analyze_n_clusters
if hasattr(args, 'second_and_on_cluster_policy'):
d['second_and_on_cluster_policy'] = args.second_and_on_cluster_policy
if hasattr(args, 'reminder_policy'):
d['reminder_policy'] = args.reminder_policy
d['THRESHOLD'] = args.THRESHOLD
return d
def _mpipe_opts_dict_from_parsed_args(self, args):
d = dict()
d['depth'] = args.depth
d['L_list'] = args.L
return d |
def add_to_ld_library_path(path):
library_path = os.environ.get('LD_LIBRARY_PATH', '')
library_paths = (library_path.split(':') if library_path else [])
if (path not in library_paths):
os.environ['LD_LIBRARY_PATH'] = ':'.join(([path] + library_paths)) |
def _class_counter(data_dict):
counter = Counter()
for (data_id, data) in data_dict.items():
counter.update([data['class_name']])
return counter |
def inference(data):
with tf.variable_scope('inference') as scope:
W_1 = utils.weight_variable([((IMAGE_SIZE * IMAGE_SIZE) * 50)], name='W_1')
b_1 = utils.bias_variable([50], name='b_1')
h_1 = tf.nn.relu((tf.matmul(data, tf.reshape(W_1, [(IMAGE_SIZE * IMAGE_SIZE), 50])) + b_1), name='h_1')
utils.add_activation_summary(h_1)
W_2 = utils.weight_variable([(50 * 50)], name='W_2')
b_2 = utils.bias_variable([50], name='b_2')
h_2 = tf.nn.relu((tf.matmul(h_1, tf.reshape(W_2, [50, 50])) + b_2), name='h_2')
utils.add_activation_summary(h_2)
W_3 = utils.weight_variable([(50 * 50)], name='W_3')
b_3 = utils.bias_variable([50], name='b_3')
h_3 = tf.nn.relu((tf.matmul(h_2, tf.reshape(W_3, [50, 50])) + b_3), name='h_3')
utils.add_activation_summary(h_3)
W_4 = utils.weight_variable([(50 * 50)], name='W_4')
b_4 = utils.bias_variable([50], name='b_4')
h_4 = tf.nn.relu((tf.matmul(h_3, tf.reshape(W_4, [50, 50])) + b_4), name='h_4')
utils.add_activation_summary(h_4)
W_final = utils.weight_variable([(50 * 10)], name='W_final')
b_final = utils.bias_variable([10], name='b_final')
pred = tf.nn.softmax((tf.matmul(h_4, tf.reshape(W_final, [50, 10])) + b_final), name='h_final')
return pred |
class MSELoss(Loss):
def __init__(self):
super().__init__('MSELoss')
def compute_loss(self, y_true, output_model):
if (output_model is None):
raise TypeError('Argument: output_model must be set.')
if (y_true is None):
raise TypeError('Argument: y_true must be set.')
if (not isinstance(output_model, torch.Tensor)):
raise TypeError('Argument: output_model must be a pytorch tensor.')
if (not isinstance(y_true, torch.Tensor)):
raise TypeError('Argument: y_true must be a pytorch tensor.')
self.__check_dim_pred_gt__(output_model, y_true)
result = (((output_model - y_true) ** 2).sum() / output_model.data.nelement())
if (not (0 <= result <= 1)):
raise ValueError('The output of MSELoss.compute_loss must be in [0,1]')
return result |
def make_numpy_ndarray_fromstring(s, dtype, shape):
return numpy.fromstring(s, dtype=dtype).reshape(shape) |
def plot_recall(measures, eval_dir, plot_file):
plt.figure(figsize=(10, 8))
plt.xlabel('cut-off', fontsize=15)
plt.ylabel('recall', fontsize=15)
for (name, measure) in measures.items():
(xs, ys) = zip(*measure.values())
labels = measure.keys()
plt.scatter(xs, ys, marker='o')
plt.plot(xs, ys, label=name)
plt.legend(loc='lower right')
plt.savefig(os.path.join(eval_dir, plot_file)) |
def check_cuda_kernel_launches():
torch_dir = os.path.dirname(os.path.realpath(__file__))
torch_dir = os.path.dirname(torch_dir)
torch_dir = os.path.dirname(torch_dir)
kernels_without_checks = 0
files_without_checks = []
for (root, dirnames, filenames) in os.walk(torch_dir):
if ((root == os.path.join(torch_dir, 'build')) or (root == os.path.join(torch_dir, 'torch/include'))):
dirnames[:] = []
continue
for x in filenames:
filename = os.path.join(root, x)
file_result = check_file(filename)
if (file_result > 0):
kernels_without_checks += file_result
files_without_checks.append(filename)
if (kernels_without_checks > 0):
count_str = f"Found {kernels_without_checks} instances in {len(files_without_checks)} files where kernel launches didn't have checks."
print(count_str, file=sys.stderr)
print('Files without checks:', file=sys.stderr)
for x in files_without_checks:
print(f' {x}', file=sys.stderr)
print(count_str, file=sys.stderr)
return kernels_without_checks |
class Aggregator(object):
def __init__(self, batch_size, dim, dropout, act, name):
if (not name):
layer = self.__class__.__name__.lower()
name = ((layer + '_') + str(get_layer_id(layer)))
self.name = name
self.dropout = dropout
self.act = act
self.batch_size = batch_size
self.dim = dim
def __call__(self, self_vectors, neighbor_vectors):
outputs = self._call(self_vectors, neighbor_vectors)
return outputs
def _call(self, self_vectors, neighbor_vectors):
pass
def _mix_neighbor_vectors(self, neighbor_vectors):
print(neighbor_vectors)
b = tf.reduce_max(neighbor_vectors, (- 1))
c = (b > 0.0)
d = tf.reduce_sum(tf.cast(c, tf.float32), (- 1), keepdims=True)
d = tf.nn.bias_add(d, [1e-10])
print('agg', b, c, d)
e = tf.tile(d, [1, 1, self.dim])
neighbors_aggregated = (tf.reduce_sum(neighbor_vectors, axis=2) / e)
return neighbors_aggregated |
((not tf), 'no TF')
def test_demo_tf_task12ax_eval():
cfg_filename = 'demos/demo-tf-native-lstm.12ax.config'
train_dataset_repr = '{"class": "Task12AXDataset", "num_seqs": 10}'
dev_dataset_repr = '{"class": "Task12AXDataset", "num_seqs": 10}'
fer1 = run_config_get_fer(cfg_filename, '++num_epochs', '2', '++train', train_dataset_repr, '++dev', dev_dataset_repr, print_stdout=True, post_cleanup=False)
fer2 = run_config_get_fer(cfg_filename, '++task', 'eval', '++load_epoch', '2', '++train', 'None', '++dev', dev_dataset_repr, print_stdout=True, pre_cleanup=False, post_cleanup=False)
assert (fer1 == fer2)
model_filename = get_model_filename(cfg_filename)
ep2_files = glob((model_filename + '.002.*'))
assert ep2_files, f'No model files found for epoch 2, {model_filename}'
for fn in ep2_files:
shutil.copy(fn, fn.replace('.002.', '.003.'))
fer3 = run_config_get_fer(cfg_filename, '++task', 'eval', '++load_epoch', '3', '++train', 'None', '++dev', dev_dataset_repr, print_stdout=True, pre_cleanup=False)
assert (fer3 != fer2) |
def sample_queries(data_dir, db_name, out_dir):
in_json = os.path.join(data_dir, '{}.json'.format(db_name))
sqls = load_sqls(in_json, normalize_variables=True)
num_samples = 3
count = 0
for idx in np.random.randint(0, len(sqls), num_samples):
out_txt = os.path.join(out_dir, '{}-{}.txt'.format(db_name, count))
with open(out_txt, 'w') as o_f:
o_f.write(sqls[idx])
print('{} SQL query saved to {}'.format(db_name, out_txt))
count += 1 |
class SetupCallback(Callback):
def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config):
super().__init__()
self.resume = resume
self.now = now
self.logdir = logdir
self.ckptdir = ckptdir
self.cfgdir = cfgdir
self.config = config
self.lightning_config = lightning_config
def on_keyboard_interrupt(self, trainer, pl_module):
if (trainer.global_rank == 0):
print('Summoning checkpoint.')
ckpt_path = os.path.join(self.ckptdir, 'last.ckpt')
trainer.save_checkpoint(ckpt_path)
def on_fit_start(self, trainer, pl_module):
if (trainer.global_rank == 0):
os.makedirs(self.logdir, exist_ok=True)
os.makedirs(self.ckptdir, exist_ok=True)
os.makedirs(self.cfgdir, exist_ok=True)
if ('callbacks' in self.lightning_config):
if ('metrics_over_trainsteps_checkpoint' in self.lightning_config['callbacks']):
os.makedirs(os.path.join(self.ckptdir, 'trainstep_checkpoints'), exist_ok=True)
print('Project config')
print(OmegaConf.to_yaml(self.config))
OmegaConf.save(self.config, os.path.join(self.cfgdir, '{}-project.yaml'.format(self.now)))
print('Lightning config')
print(OmegaConf.to_yaml(self.lightning_config))
OmegaConf.save(OmegaConf.create({'lightning': self.lightning_config}), os.path.join(self.cfgdir, '{}-lightning.yaml'.format(self.now)))
elif ((not self.resume) and os.path.exists(self.logdir)):
(dst, name) = os.path.split(self.logdir)
dst = os.path.join(dst, 'child_runs', name)
os.makedirs(os.path.split(dst)[0], exist_ok=True)
try:
os.rename(self.logdir, dst)
except FileNotFoundError:
pass |
class ManinSymbolList_group(ManinSymbolList):
def __init__(self, level, weight, syms):
self.__level = level
self.__syms = syms
L = [(i, u, v) for i in range(((weight - 2) + 1)) for (u, v) in syms.list()]
ManinSymbolList.__init__(self, weight, L)
def level(self):
return self.__level
def apply_S(self, j):
(i, u, v) = self._symbol_list[j]
k = self.index((((self._weight - 2) - i), v, (- u)))
return (k, ((- 1) if (i % 2) else 1))
def _apply_S_only_0pm1(self):
return True
def apply_I(self, j):
(i, u, v) = self._symbol_list[j]
k = self.index((i, (- u), v))
if ((i % 2) == 0):
return (k, 1)
else:
return (k, (- 1))
def apply_T(self, j):
k = self._weight
(i, u, v) = self._symbol_list[j]
(u, v) = self.__syms.normalize(v, ((- u) - v))
if (((k - 2) % 2) == 0):
s = 1
else:
s = (- 1)
z = []
a = Integer(((k - 2) - i))
for j in range((((k - 2) - i) + 1)):
m = self.index((j, u, v))
z.append((m, (s * a.binomial(j))))
s *= (- 1)
return z
def apply_TT(self, j):
k = self._weight
(i, u, v) = self._symbol_list[j]
(u, v) = self.__syms.normalize(((- u) - v), u)
if ((((k - 2) - i) % 2) == 0):
s = 1
else:
s = (- 1)
z = []
a = Integer(i)
for j in range((i + 1)):
m = self.index(((((k - 2) - i) + j), u, v))
z.append((m, (s * a.binomial(j))))
s *= (- 1)
return z
def apply(self, j, m):
(a, b, c, d) = (m[0], m[1], m[2], m[3])
(i, u, v) = self._symbol_list[j]
P = apply_to_monomial(i, (self._weight - 2), a, b, c, d)
m = self.index((0, ((u * a) + (v * c)), ((u * b) + (v * d))))
if (m == (- 1)):
return []
r = len(self.__syms)
return [((m + (r * k)), P[k]) for k in range(((self._weight - 2) + 1)) if (P[k] != 0)]
def normalize(self, x):
(u, v) = self.__syms.normalize(x[1], x[2])
return (x[0], u, v) |
def register_Ns3CallbackImplBase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected')
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::ObjectBase*'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'void'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::MobilityModel const> '])
return |
.parametrize('env_name', ['cartpole-random', 'pendulum-random'])
def test_get_dataset(env_name: str) -> None:
(_, env) = get_dataset(env_name)
if (env_name == 'cartpole-random'):
assert (env.unwrapped.spec.id == 'CartPole-v1')
elif (env_name == 'pendulum-random'):
assert (env.unwrapped.spec.id == 'Pendulum-v1') |
def check_damping_factor(damping_factor: float):
if ((damping_factor < 0) or (damping_factor >= 1)):
raise ValueError('A damping factor must have a value in [0, 1[.') |
class EmptyRecord():
def __init__(self, is_tuple, parameters):
self.length_ = 0
self.is_tuple_ = is_tuple
self.parameters_ = parameters
self.set_id(Ref(0))
def append(self):
self.length_ += 1
def extend(self, size):
self.length_ += size
def parameters(self):
return self.parameters_
def set_id(self, id: Ref(int)):
self.id_ = id.value
def clear(self):
self.length_ = 0
def length(self):
return self.length_
def is_valid(self, error: Ref(str)):
return True
def buffer_nbytes(self, names_nbytes):
pass
def to_buffers(self, buffers):
pass
def form(self):
params = ('' if (self.parameters_ == '') else f', parameters: {self.parameters_}')
if self.is_tuple_:
return f'{{"class": "RecordArray", "contents": [], "form_key": "node{self.id_}"{params}}}'
else:
return f'{{"class": "RecordArray", "contents": {{}}, "form_key": "node{self.id_}"{params}}}' |
def convert_to_list(python_input):
if isinstance(python_input, torch.Tensor):
return [python_input]
else:
return list(python_input) |
def get_eps_scheduler(args, max_eps, train_data):
eps_scheduler = eval(args.scheduler_name)(max_eps, args.scheduler_opts)
epoch_length = int((((len(train_data.dataset) + train_data.batch_size) - 1) / train_data.batch_size))
eps_scheduler.set_epoch_length(epoch_length)
return eps_scheduler |
class MonodepthOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(description='Monodepthv2 options')
self.parser.add_argument('--data_path', type=str, help='path to the training data', default=os.path.join(file_dir, 'kitti'))
self.parser.add_argument('--log_dir', type=str, help='log directory', default='./log')
self.parser.add_argument('--model_name', type=str, help='the name of the folder to save the model in', default='mdp')
self.parser.add_argument('--split', type=str, help='which training split to use', choices=['eigen_zhou', 'eigen_full', 'eigen_full_left', 'odom', 'benchmark'], default='eigen_full_left')
self.parser.add_argument('--num_layers', type=int, help='number of resnet layers', default=50, choices=[18, 34, 50, 101, 152])
self.parser.add_argument('--dataset', type=str, help='dataset to train on', default='kitti', choices=['kitti', 'kitti_odom', 'kitti_depth', 'kitti_test'])
self.parser.add_argument('--png', help='if set, trains from raw KITTI png files (instead of jpgs)', action='store_true')
self.parser.add_argument('--height', type=int, help='input image height', default=192)
self.parser.add_argument('--width', type=int, help='input image width', default=640)
self.parser.add_argument('--alpha_smooth', type=float, help='disparity smoothness weight', default=0.04)
self.parser.add_argument('--self_distillation', type=float, help='self_distillation weight', default=0.0)
self.parser.add_argument('--gamma_smooth', type=float, help='gamma of smooth loss', default=2)
self.parser.add_argument('--alpha_pc', type=float, help='perceptual loss weight', default=0.1)
self.parser.add_argument('--disp_min', type=float, help='minimum depth', default=2.0)
self.parser.add_argument('--disp_max', type=float, help='maximum depth', default=300.0)
self.parser.add_argument('--disp_levels', type=int, help='num levels of disp', default=49)
self.parser.add_argument('--disp_layers', type=int, help='num layers of disp', default=2)
self.parser.add_argument('--novel_frame_ids', nargs='+', type=int, help='frames to load', default=[])
self.parser.add_argument('--net_type', type=str, help='train which network', default='ResNet', choices=['PladeNet', 'ResNet', 'FalNet'])
self.parser.add_argument('--num_ep', type=int, help='train which stage', default=8)
self.parser.add_argument('--warp_type', type=str, help='the type of warp', default='disp_warp', choices=['depth_warp', 'disp_warp', 'homography_warp'])
self.parser.add_argument('--match_aug', action='store_true', help='if set, use color augmented data to compute loss')
self.parser.add_argument('--use_denseaspp', action='store_true', help='use DenseAspp block in ResNet')
self.parser.add_argument('--use_mom', action='store_true', help='use mirror occlusion mask')
self.parser.add_argument('--flip_right', action='store_true', help='use fliped right image to train')
self.parser.add_argument('--pc_net', type=str, help='the type of net to compute pc loss', default='vgg19', choices=['vgg19', 'resnet18'])
self.parser.add_argument('--xz_levels', type=int, help='num levels of xz plane', default=14)
self.parser.add_argument('--yz_levels', type=int, help='num levels of yz plane', default=0)
self.parser.add_argument('--use_mixture_loss', action='store_true', help='use mixture loss')
self.parser.add_argument('--alpha_self', type=float, help='perceptual loss weight', default=0.0)
self.parser.add_argument('--depth_regression_space', type=str, help='how to compute regression depth', default='inv', choices=['inv', 'exp'])
self.parser.add_argument('--render_probability', action='store_true', help='If set, render probability as NeRF')
self.parser.add_argument('--plane_residual', action='store_true', help='If set, use residual plane based on init plane')
self.parser.add_argument('--no_crop', action='store_true', help='if set, do not use resize crop data aug')
self.parser.add_argument('--pe_type', type=str, help='the type of positional embedding', default='neural', choices=['neural', 'frequency'])
self.parser.add_argument('--use_colmap', action='store_true', help='if set, use colmap instead of predicting pose by posenet')
self.parser.add_argument('--colmap_path', type=str, help='path to the colmap data', default='./kitti_colmap')
self.parser.add_argument('--no_stereo', action='store_true', help='if set, disable stereo supervised')
self.parser.add_argument('--batch_size', type=int, help='batch size', default=8)
self.parser.add_argument('--learning_rate', type=float, help='learning rate', default=0.0001)
self.parser.add_argument('--beta_1', type=float, help='beta1 of Adam', default=0.5)
self.parser.add_argument('--beta_2', type=float, help='beta2 of Adam', default=0.999)
self.parser.add_argument('--num_epochs', type=int, help='number of epochs', default=50)
self.parser.add_argument('--start_epoch', type=int, help='number of epochs', default=0)
self.parser.add_argument('--milestones', default=[30, 40], nargs='*', help='epochs at which learning rate is divided by 2')
self.parser.add_argument('--scheduler_step_size', type=int, help='epochs at which learning rate times 0.1', default=15)
self.parser.add_argument('--avg_reprojection', help='if set, uses average reprojection loss', action='store_true')
self.parser.add_argument('--automask', help='if set, do auto-masking', action='store_true')
self.parser.add_argument('--num_workers', type=int, help='number of dataloader workers', default=12)
self.parser.add_argument('--load_weights_folder', type=str, help='name of model to load')
self.parser.add_argument('--models_to_load', nargs='+', type=str, help='models to load', default=['encoder', 'depth'])
self.parser.add_argument('--stage1_weights_folder', type=str, help='path of teacher model to load')
self.parser.add_argument('--log_frequency', type=int, help='number of batches between each tensorboard log', default=500)
self.parser.add_argument('--log_img_frequency', type=int, help='number of batches between each tensorboard log', default=250)
self.parser.add_argument('--use_ssim', help='if set, use ssim in the loss', action='store_true')
self.parser.add_argument('--eval_stereo', help='if set evaluates in stereo mode', action='store_true')
self.parser.add_argument('--eval_mono', help='if set evaluates in mono mode', action='store_true')
self.parser.add_argument('--disable_median_scaling', help='if set disables median scaling in evaluation', action='store_true')
self.parser.add_argument('--pred_depth_scale_factor', help='if set multiplies predictions by this number', type=float, default=1)
self.parser.add_argument('--ext_disp_to_eval', type=str, help='optional path to a .npy disparities file to evaluate')
self.parser.add_argument('--eval_split', type=str, default='eigen_raw', choices=['eigen_raw', 'eigen_improved', 'eigen_benchmark', 'benchmark', 'odom_9', 'odom_10', 'city'], help='which split to run eval on')
self.parser.add_argument('--save_pred_disps', help='if set saves predicted disparities', action='store_true')
self.parser.add_argument('--no_eval', help='if set disables evaluation', action='store_true')
self.parser.add_argument('--eval_eigen_to_benchmark', help='if set assume we are loading eigen results from npy but we want to evaluate using the new benchmark.', action='store_true')
self.parser.add_argument('--eval_out_dir', help='if set will output the disparities to this folder', type=str)
self.parser.add_argument('--post_process', help='if set will perform the flipping post processing from the original monodepth paper', action='store_true')
def parse(self):
self.options = self.parser.parse_args()
return self.options |
def resolve_ssl_version(candidate):
if (candidate is None):
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if (res is None):
res = getattr(ssl, ('PROTOCOL_' + candidate))
return res
return candidate |
def get_numeracy_adapter_spec(max_train_instances: int, max_eval_instances: int, dim: int, delimiter: str=', ', **kwargs) -> AdapterSpec:
return AdapterSpec(**{**{'method': ADAPT_GENERATION, 'instructions': get_dataset_header(dim, delimiter=delimiter, output_prefix=', '), 'max_train_instances': max_train_instances, 'max_eval_instances': max_eval_instances, 'num_outputs': 1, 'num_train_trials': 1, 'model_deployment': 'openai/davinci', 'temperature': 0, 'stop_sequences': ['\n'], 'max_tokens': 20, 'input_prefix': '', 'output_prefix': ', ', 'instance_prefix': '\n'}, **kwargs}) |
class FGP_Element(ModuleElement):
def __init__(self, parent, x, check=DEBUG):
if check:
assert (x in parent.V()), (('The argument x=' + str(x)) + ' is not in the covering module!')
ModuleElement.__init__(self, parent)
self._x = x
def lift(self):
return self._x
def __neg__(self):
P = self.parent()
return P.element_class(P, (- self._x))
def _add_(self, other):
P = self.parent()
return P.element_class(P, (self._x + other._x))
def _sub_(self, other):
P = self.parent()
return P.element_class(P, (self._x - other._x))
def _rmul_(self, c):
P = self.parent()
return P.element_class(P, self._x._rmul_(c))
def _lmul_(self, s):
P = self.parent()
return P.element_class(P, self._x._lmul_(s))
def _repr_(self):
return repr(self.vector())
def __getitem__(self, *args):
return self.vector().__getitem__(*args)
def vector(self):
try:
return self.__vector
except AttributeError:
self.__vector = self.parent().coordinate_vector(self, reduce=True)
self.__vector.set_immutable()
return self.__vector
def __hash__(self):
return hash(self.vector())
def _vector_(self, base_ring=None):
v = self.vector()
if ((base_ring is None) or (v.base_ring() is base_ring)):
return v.__copy__()
else:
return v.change_ring(base_ring)
def _richcmp_(self, right, op):
return richcmp(self.vector(), right.vector(), op)
def additive_order(self):
Q = self.parent()
I = Q.invariants()
v = self.vector()
from sage.rings.infinity import infinity
from sage.rings.finite_rings.integer_mod import Mod
from sage.rings.integer import Integer
from sage.arith.functions import lcm
n = Integer(1)
for (i, a) in enumerate(I):
if (a == 0):
if (v[i] != 0):
return infinity
else:
n = lcm(n, Mod(v[i], a).additive_order())
return n |
def crop_column(crop_colname: str, df: pd.DataFrame, time_start_colname: str='start_secs', time_end_colname: str='end_secs', max_crop_duration: Optional[float]=None) -> pd.DataFrame:
for (i, row) in tqdm(df.iterrows(), desc=f'crop {crop_colname}'):
(start, end) = (row[time_start_colname], row[time_end_colname])
if max_crop_duration:
end = min(end, (start + max_crop_duration))
cropped = [x for x in row[crop_colname] if ((x['time'] >= start) and (x['time'] <= end))]
_ = [x.update({'time': (x['time'] - start)}) for x in cropped]
assert (all(((x['time'] >= 0) and (x['time'] <= (end - start)))) for x in cropped)
df.at[(i, crop_colname)] = cropped
return df |
def test_num_4():
array = ak.Array(ak.contents.numpyarray.NumpyArray(np.array([[0.0, 1.1], [2.2, 3.3], [4.4, 5.5]])))
cuda_array = ak.to_backend(array, 'cuda')
assert (ak.num(cuda_array, 0) == ak.num(array, 0))
assert (ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist()) |
class SDDivGradTerm(Term):
name = 'ev_sd_div_grad'
arg_types = ('opt_material', 'parameter_u', 'parameter_w', 'parameter_mv')
arg_shapes = [{'opt_material': '1, 1', 'parameter_u': 'D', 'parameter_w': 'D', 'parameter_mv': 'D'}, {'opt_material': None}]
function = staticmethod(terms.d_sd_div_grad)
def get_fargs(self, mat, par_u, par_w, par_mv, mode=None, term_mode=None, diff_var=None, **kwargs):
(vg, _) = self.get_mapping(par_u)
grad_u = grad_as_vector(self.get(par_u, 'grad'))
grad_w = grad_as_vector(self.get(par_w, 'grad'))
div_mv = self.get(par_mv, 'div')
grad_mv = grad_as_vector(self.get(par_mv, 'grad'))
if (mat is None):
mat = nm.ones((1, div_mv.shape[1], 1, 1), dtype=nm.float64)
return (grad_u, grad_w, div_mv, grad_mv, mat, vg, get_default(term_mode, 1))
def get_eval_shape(self, mat, par_u, par_w, par_mv, mode=None, term_mode=None, diff_var=None, **kwargs):
(n_el, n_qp, dim, n_en, n_c) = self.get_data_shape(par_u)
return ((n_el, 1, 1, 1), par_u.dtype) |
def _seg_45():
return [(64540, 'M', u''), (64541, 'M', u''), (64542, 'M', u''), (64543, 'M', u''), (64544, 'M', u''), (64545, 'M', u''), (64546, 'M', u''), (64547, 'M', u''), (64548, 'M', u''), (64549, 'M', u''), (64550, 'M', u''), (64551, 'M', u''), (64552, 'M', u''), (64553, 'M', u''), (64554, 'M', u''), (64555, 'M', u''), (64556, 'M', u''), (64557, 'M', u''), (64558, 'M', u''), (64559, 'M', u''), (64560, 'M', u''), (64561, 'M', u''), (64562, 'M', u''), (64563, 'M', u''), (64564, 'M', u''), (64565, 'M', u''), (64566, 'M', u''), (64567, 'M', u''), (64568, 'M', u''), (64569, 'M', u''), (64570, 'M', u''), (64571, 'M', u''), (64572, 'M', u''), (64573, 'M', u''), (64574, 'M', u''), (64575, 'M', u''), (64576, 'M', u''), (64577, 'M', u''), (64578, 'M', u''), (64579, 'M', u''), (64580, 'M', u''), (64581, 'M', u''), (64582, 'M', u''), (64583, 'M', u''), (64584, 'M', u''), (64585, 'M', u''), (64586, 'M', u''), (64587, 'M', u''), (64588, 'M', u''), (64589, 'M', u''), (64590, 'M', u''), (64591, 'M', u''), (64592, 'M', u''), (64593, 'M', u''), (64594, 'M', u''), (64595, 'M', u''), (64596, 'M', u''), (64597, 'M', u''), (64598, 'M', u''), (64599, 'M', u''), (64600, 'M', u''), (64601, 'M', u''), (64602, 'M', u''), (64603, 'M', u''), (64604, 'M', u''), (64605, 'M', u''), (64606, '3', u' '), (64607, '3', u' '), (64608, '3', u' '), (64609, '3', u' '), (64610, '3', u' '), (64611, '3', u' '), (64612, 'M', u''), (64613, 'M', u''), (64614, 'M', u''), (64615, 'M', u''), (64616, 'M', u''), (64617, 'M', u''), (64618, 'M', u''), (64619, 'M', u''), (64620, 'M', u''), (64621, 'M', u''), (64622, 'M', u''), (64623, 'M', u''), (64624, 'M', u''), (64625, 'M', u''), (64626, 'M', u''), (64627, 'M', u''), (64628, 'M', u''), (64629, 'M', u''), (64630, 'M', u''), (64631, 'M', u''), (64632, 'M', u''), (64633, 'M', u''), (64634, 'M', u''), (64635, 'M', u''), (64636, 'M', u''), (64637, 'M', u''), (64638, 'M', u''), (64639, 'M', u'')] |
class CommutativeRings(CategoryWithAxiom):
class ParentMethods():
def _test_divides(self, **options):
tester = self._tester(**options)
a = self.an_element()
try:
a.divides
except AttributeError:
return
z = self.zero()
o = self.one()
tester.assertTrue(z.divides(z))
tester.assertTrue(o.divides(o))
tester.assertTrue(o.divides(z))
tester.assertIs(z.divides(o), self.is_zero())
if (not self.is_exact()):
return
for (a, b) in tester.some_elements(repeat=2):
try:
test = a.divides((a * b))
except NotImplementedError:
pass
else:
tester.assertTrue(test)
def over(self, base=None, gen=None, gens=None, name=None, names=None):
from sage.rings.ring_extension import RingExtension
if (name is not None):
if (names is not None):
raise ValueError("keyword argument 'name' cannot be combined with 'names'")
names = (name,)
if (gen is not None):
if (gens is not None):
raise ValueError("keyword argument 'gen' cannot be combined with 'gens'")
gens = (gen,)
return RingExtension(self, base, gens, names)
class ElementMethods():
pass
class Finite(CategoryWithAxiom):
class ParentMethods():
def cyclotomic_cosets(self, q, cosets=None):
q = self(q)
try:
(~ q)
except ZeroDivisionError:
raise ValueError(('%s is not invertible in %s' % (q, self)))
if (cosets is None):
rest = set(self)
else:
rest = {self(x) for x in cosets}
orbits = []
while rest:
x0 = rest.pop()
o = [x0]
x = (q * x0)
while (x != x0):
o.append(x)
rest.discard(x)
x *= q
o.sort()
orbits.append(o)
orbits.sort()
return orbits
class CartesianProducts(CartesianProductsCategory):
def extra_super_categories(self):
return [CommutativeRings()] |
class Meteor():
def __init__(self):
self.meteor_cmd = ['java', '-jar', '-Xmx2G', METEOR_JAR, '-', '-', '-stdio', '-l', 'en', '-norm']
self.meteor_p = subprocess.Popen(self.meteor_cmd, cwd=os.path.dirname(os.path.abspath(__file__)), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.lock = threading.Lock()
def compute_score(self, gts, res):
assert (gts.keys() == res.keys())
imgIds = gts.keys()
scores = []
eval_line = 'EVAL'
self.lock.acquire()
for i in imgIds:
assert (len(res[i]) == 1)
stat = self._stat(res[i][0], gts[i])
eval_line += ' ||| {}'.format(stat)
self.meteor_p.stdin.write('{}\n'.format(eval_line).encode())
self.meteor_p.stdin.flush()
for i in range(0, len(imgIds)):
scores.append(float(self.meteor_p.stdout.readline().strip()))
score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return (score, scores)
def method(self):
return 'METEOR'
def _stat(self, hypothesis_str, reference_list):
hypothesis_str = hypothesis_str.replace('|||', '').replace(' ', ' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
self.meteor_p.stdin.write('{}\n'.format(score_line).encode())
self.meteor_p.stdin.flush()
return self.meteor_p.stdout.readline().decode().strip()
def _score(self, hypothesis_str, reference_list):
self.lock.acquire()
hypothesis_str = hypothesis_str.replace('|||', '').replace(' ', ' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
self.meteor_p.stdin.write('{}\n'.format(score_line))
stats = self.meteor_p.stdout.readline().strip()
eval_line = 'EVAL ||| {}'.format(stats)
self.meteor_p.stdin.write('{}\n'.format(eval_line))
score = float(self.meteor_p.stdout.readline().strip())
score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return score
def __del__(self):
self.lock.acquire()
self.meteor_p.stdin.close()
self.meteor_p.kill()
self.meteor_p.wait()
self.lock.release() |
.parametrize('set_loss', [dict(set_loss_nan=False, set_loss_inf=False), dict(set_loss_nan=True, set_loss_inf=False), dict(set_loss_nan=False, set_loss_inf=True)])
def test_check_invalid_loss_hook(set_loss):
class DemoModel(nn.Module):
def __init__(self, set_loss_nan=False, set_loss_inf=False):
super().__init__()
self.set_loss_nan = set_loss_nan
self.set_loss_inf = set_loss_inf
self.linear = nn.Linear(2, 1)
def forward(self, x):
return self.linear(x)
def train_step(self, x, optimizer, **kwargs):
if self.set_loss_nan:
return dict(loss=torch.tensor(float('nan')))
elif self.set_loss_inf:
return dict(loss=torch.tensor(float('inf')))
else:
return dict(loss=self(x))
loader = DataLoader(torch.ones((5, 2)))
runner = _build_demo_runner()
demo_model = DemoModel(**set_loss)
runner.model = demo_model
runner.register_hook_from_cfg(dict(type='CheckInvalidLossHook', interval=1))
if ((not set_loss['set_loss_nan']) and (not set_loss['set_loss_inf'])):
runner.run([loader], [('train', 1)])
else:
with pytest.raises(AssertionError):
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir) |
.parametrize('method', ('as_requests_kwargs', 'as_werkzeug_kwargs'))
def test_serialize_yaml(open_api_3_schema_with_yaml_payload, method):
schema = schemathesis.from_dict(open_api_3_schema_with_yaml_payload)
(case=schema['/yaml']['POST'].as_strategy())
(max_examples=1)
def test(case):
kwargs = getattr(case, method)()
assert (kwargs['headers']['Content-Type'] == 'text/yaml')
assert (kwargs['data'] == '- 42\n')
test() |
def _apply_split(dataset: ImageFolder, split: List[str]):
img_paths = []
for (path, label) in dataset.samples:
root_with_slash = os.path.join(dataset.root, '')
img_paths.append(path.replace(root_with_slash, ''))
split_set = set(split)
samples = []
for (path, sample) in zip(img_paths, dataset.samples):
if ((len(split) > 0) and ('.jpg' not in split[0]) and (dataset.name == 'ISIC')):
path = path.replace('.jpg', '')
if (path in split_set):
samples.append(sample)
dataset.samples = samples
dataset.imgs = samples
dataset.targets = [s[1] for s in samples] |
def get_combination_wise_output_matrix(y, order):
return np.array([set((tuple(combination) for combination in it.combinations_with_replacement(get_indicator_representation(row), order))) for row in y]) |
def compute_score_with_logits(logits, labels):
if (labels.shape[0] == 0):
scores = torch.zeros(*labels.size()).to(logits.device)
return scores
logits = torch.max(logits, 1)[1].data
one_hots = torch.zeros(*labels.size()).to(logits.device)
one_hots.scatter_(1, logits.view((- 1), 1), 1)
scores = (one_hots * labels)
return scores |
def run_cases(cases, run_lambda, set_key):
job_arg = [(case, run_lambda, set_key, index, len(cases)) for (index, case) in enumerate(cases) if (not result_exists(set_key, case))]
print(f"{(len(cases) - len(job_arg))}/{len(cases)} cases won't be calculated because their results already exist.")
jobs = []
pool = Pool(num_pools)
for case in job_arg:
jobs.append(pool.apply_async(run, args=case, callback=write_result))
pool.close()
pool.join() |
class EisensteinExtensionRingCappedRelative(EisensteinExtensionGeneric, pAdicCappedRelativeRingGeneric):
def __init__(self, exact_modulus, poly, prec, print_mode, shift_seed, names, implementation='NTL'):
unram_prec = (((prec + poly.degree()) - 1) // poly.degree())
ntl_poly = ntl_ZZ_pX([a.lift() for a in poly.list()], (poly.base_ring().prime() ** unram_prec))
shift_poly = ntl_ZZ_pX([a.lift() for a in shift_seed.list()], (shift_seed.base_ring().prime() ** unram_prec))
if (unram_prec <= 30):
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), unram_prec, unram_prec, prec, False, ntl_poly, 'small', 'e', shift_poly)
else:
self.prime_pow = PowComputer_ext_maker(poly.base_ring().prime(), 30, unram_prec, prec, False, ntl_poly, 'big', 'e', shift_poly)
self._shift_seed = shift_seed
self._exact_modulus = exact_modulus
self._implementation = implementation
EisensteinExtensionGeneric.__init__(self, poly, prec, print_mode, names, pAdicZZpXCRElement) |
class PreNormResidual(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x):
return (self.fn(self.norm(x)) + x) |
class TrackedLayers():
def __init__(self, *layers_modules):
self._layers_dict = {}
self._layers_modules = list(layers_modules)
self._namespaces = defaultdict(list)
self._current_namespace = '/'
def track_module(self, layers_module):
self._layers_modules.append(layers_module)
def get_layers_dict(self):
return self._layers_dict
def get_layer(self, name):
return self._layers_dict[name]
def namespace(self, name):
old_namespace = self._current_namespace
try:
self._current_namespace = ((self._current_namespace + name) + '/')
(yield)
finally:
self._current_namespace = old_namespace
def get_namespaces(self):
return self._namespaces
def __getattr__(self, attr):
layer_class = None
for module in self._layers_modules:
if hasattr(module, attr):
layer_class = getattr(module, attr)
break
if (layer_class is None):
raise KeyError(attr)
def get_layer(*args, **kwargs):
layer_name = kwargs['name']
if (not (layer_name in self._layers_dict)):
new_layer = layer_class(*args, **kwargs)
self._layers_dict[layer_name] = new_layer
self._namespaces[self._current_namespace].append(new_layer)
return self._layers_dict[layer_name]
return get_layer |
def calculate_inception_stats(image_path, num_expected=None, seed=0, max_batch_size=64, num_workers=3, prefetch_factor=2, device=torch.device('cuda')):
if (dist.get_rank() != 0):
torch.distributed.barrier()
dist.print0('Loading Inception-v3 model...')
detector_url = '
detector_kwargs = dict(return_features=True)
feature_dim = 2048
with dnnlib.util.open_url(detector_url, verbose=(dist.get_rank() == 0)) as f:
detector_net = pickle.load(f).to(device)
dist.print0(f'Loading images from "{image_path}"...')
dataset_obj = dataset.ImageFolderDataset(path=image_path, max_size=num_expected, random_seed=seed)
if ((num_expected is not None) and (len(dataset_obj) < num_expected)):
raise click.ClickException(f'Found {len(dataset_obj)} images, but expected at least {num_expected}')
if (len(dataset_obj) < 2):
raise click.ClickException(f'Found {len(dataset_obj)} images, but need at least 2 to compute statistics')
if (dist.get_rank() == 0):
torch.distributed.barrier()
num_batches = ((((len(dataset_obj) - 1) // (max_batch_size * dist.get_world_size())) + 1) * dist.get_world_size())
all_batches = torch.arange(len(dataset_obj)).tensor_split(num_batches)
rank_batches = all_batches[dist.get_rank()::dist.get_world_size()]
data_loader = torch.utils.data.DataLoader(dataset_obj, batch_sampler=rank_batches, num_workers=num_workers, prefetch_factor=prefetch_factor)
dist.print0(f'Calculating statistics for {len(dataset_obj)} images...')
mu = torch.zeros([feature_dim], dtype=torch.float64, device=device)
sigma = torch.zeros([feature_dim, feature_dim], dtype=torch.float64, device=device)
for (images, _labels) in tqdm.tqdm(data_loader, unit='batch', disable=(dist.get_rank() != 0)):
torch.distributed.barrier()
if (images.shape[0] == 0):
continue
if (images.shape[1] == 1):
images = images.repeat([1, 3, 1, 1])
features = detector_net(images.to(device), **detector_kwargs).to(torch.float64)
mu += features.sum(0)
sigma += (features.T features)
torch.distributed.all_reduce(mu)
torch.distributed.all_reduce(sigma)
mu /= len(dataset_obj)
sigma -= (mu.ger(mu) * len(dataset_obj))
sigma /= (len(dataset_obj) - 1)
return (mu.cpu().numpy(), sigma.cpu().numpy()) |
def create_splits_logs(split: str, nusc: 'NuScenes') -> List[str]:
scene_splits = create_splits_scenes(verbose=False)
assert (split in scene_splits.keys()), 'Requested split {} which is not a known nuScenes split.'.format(split)
version = nusc.version
if (split in {'train', 'val', 'train_detect', 'train_track'}):
assert version.endswith('trainval'), 'Requested split {} which is not compatible with NuScenes version {}'.format(split, version)
elif (split in {'mini_train', 'mini_val'}):
assert version.endswith('mini'), 'Requested split {} which is not compatible with NuScenes version {}'.format(split, version)
elif (split == 'test'):
assert version.endswith('test'), 'Requested split {} which is not compatible with NuScenes version {}'.format(split, version)
else:
raise ValueError('Requested split {} which this function cannot map to logs.'.format(split))
scene_to_log = {scene['name']: nusc.get('log', scene['log_token'])['logfile'] for scene in nusc.scene}
logs = set()
scenes = scene_splits[split]
for scene in scenes:
logs.add(scene_to_log[scene])
return list(logs) |
class CohereTokenCounter(TokenCounter):
def count_tokens(self, request: Request, completions: List[Sequence]) -> int:
return sum((len(sequence.tokens) for sequence in completions)) |
def test_validation_sha_without_split(tmp_path):
tmp_no_split_output_dir = (tmp_path / 'pretraining_sha256_no_split')
logging.info(f'temporary no split output directory is in {tmp_no_split_output_dir}')
input_file = os.path.join(Path.cwd(), 'tests', 'examples', 'pretraining', 'example_pretraining_data.jsonl')
num_workers = os.cpu_count()
if (num_workers is None):
num_workers = 1
args_dict = {'cmd': 'pipeline', 'num_training_splits': 4, 'dev_ratio': None, 'num_dev_splits': None, 'test_ratio': None, 'num_test_splits': None, 'shuffle': 'on_RAM', 'num_workers': num_workers, 'keep_split_jsonls': False, 'input_file_path': input_file, 'output_path': tmp_no_split_output_dir, 'overwrite_output_path': False, 'disable_space_separator': False, 'keep_prompt_only_sequences': False, 'silent': False, 'do_not_balance_hdf5': False, 'log_file_path': None, 'tokenizer_class': None, 'pretrained_tokenizer': None, 'vocab_file': None, 'merges_file': None, 'max_seq_length': 1024, 'input_packing_config': PackingConfig.get_default(), 'packing_boundary': BoundaryType.JSONL.value, 'attention_boundary': BoundaryType.JSONL.value, 'special_tokens_dict': None, 'prompt_keyword': 'prompt', 'completion_keyword': 'completion', 'prompt_prefix': None, 'prompt_postfix': None, 'categories_path': None}
args = Namespace(**args_dict)
main(args)
assert validate_sha256(tmp_no_split_output_dir) |
class ModelInfo():
def __init__(self, factory: nn.Module, args: dict, batch_size: int, dataset_args: dict, use_sgd: bool=False, img_size: int=IMG_SIZE):
self.factory = factory
self.args = args
self.batch_size = batch_size
self.dataset_args = dataset_args
self.img_size = img_size
self.use_sgd = use_sgd |
def test_likelihood_executable_realign(msa_sampler):
input_aln = ['AKDKG-LDINSAEKFFEALHSESIKHQINVMEK-', 'N--EGPLDKESVRTIYELLMSSSHDIQAEQRQRE', 'GQEQN-LDSNYISQVYHTIIEQSVLSQQEFNNRF', 'N--PGPLDDSAIISMFNLIMDGSRILEKKQTNQH', 'GKEKQ-LDPQYVSQIFHTIIEDSVLYQRS-----']
query_name = 'xyz'
reference_sequence = f'''>{query_name}
{input_aln[(- 1)]}
'''
reference_sequence_unaligned = f'''>{query_name}
{input_aln[(- 1)].replace('-', '')}
'''
aligned_input_handle = StringIO(reference_sequence)
unaligned_input_handle = StringIO(reference_sequence_unaligned)
msa_string = ('\n'.join([f'''>{n}
{s}''' for (n, s) in enumerate(input_aln[:(- 1)])]) + '\n')
alignment_handle = StringIO(msa_string)
aligned_output_handle = StringIO()
likelihood_esm_msa.main(input_h=aligned_input_handle, output_h=aligned_output_handle, masking_off=True, sampler=msa_sampler, reference_msa_handle=alignment_handle, delete_insertions=False, batch_size=1, subset_strategy='in_order', alignment_size=4, unaligned_queries=False)
aligned_output_handle.seek(0)
unaligned_output_handle = StringIO()
alignment_handle.seek(0)
likelihood_esm_msa.main(input_h=unaligned_input_handle, output_h=unaligned_output_handle, masking_off=True, sampler=msa_sampler, reference_msa_handle=alignment_handle, delete_insertions=False, batch_size=1, subset_strategy='in_order', alignment_size=4, unaligned_queries=True)
unaligned_output_handle.seek(0)
(aligned_out_n, aligned_out_v) = aligned_output_handle.readline().split()
assert (aligned_out_n == 'id')
assert (aligned_out_v == 'esm-msa')
(aligned_out_n, aligned_out_v) = aligned_output_handle.readline().split()
aligned_out_v = float(aligned_out_v)
assert (aligned_out_n == query_name)
(unaligned_out_n, unaligned_out_v) = unaligned_output_handle.readline().split()
assert (unaligned_out_n == 'id')
assert (unaligned_out_v == 'esm-msa')
(unaligned_out_n, unaligned_out_v) = unaligned_output_handle.readline().split()
unaligned_out_v = float(unaligned_out_v)
assert (unaligned_out_n == query_name)
assert (unaligned_out_v == pytest.approx(aligned_out_v)) |
def get_decode_dir_name(ckpt_name):
if ('train' in FLAGS.data_path):
dataset = 'train'
elif ('val' in FLAGS.data_path):
dataset = 'val'
elif ('test' in FLAGS.data_path):
dataset = 'test'
else:
raise ValueError(('FLAGS.data_path %s should contain one of train, val or test' % FLAGS.data_path))
dirname = ('decode_%s_%imaxenc_%ibeam_%imindec_%imaxdec' % (dataset, FLAGS.max_enc_steps, FLAGS.beam_size, FLAGS.min_dec_steps, FLAGS.max_dec_steps))
if (ckpt_name is not None):
dirname += ('_%s' % ckpt_name)
return dirname |
(numba_geometry_spec)
class NumbaRadial1DGeometry(object):
def __init__(self, r_inner, r_outer, v_inner, v_outer):
self.r_inner = r_inner
self.r_outer = r_outer
self.v_inner = v_inner
self.v_outer = v_outer |
def get_third_party():
txt_files = list(Path('./requirements').rglob('*.txt'))
package_list = []
for file in txt_files:
with open(file, 'r') as fp:
for line in fp:
line = line.strip()
if (line == ''):
continue
package_list.append(line.split(' ')[0])
return package_list |
.overload_method(UnionType, 'append_content')
def Union_append_content(builder, tag):
if (isinstance(builder, UnionType) and isinstance(tag, numba.types.Integer)):
def append_content(builder, tag):
content = builder._contents[numba.literally(tag)]
builder._tags.append(tag)
builder._index.append(len(content))
return content
return append_content |
class TriggerPool():
def __init__(self):
self.triggers = []
self.results = []
def add(self, trigger):
self.triggers.append(trigger)
def test(self, model, data):
untested_triggers = range(len(self.results), len(self.triggers))
for i in untested_triggers:
self.results.append(model.test(data, 0.1, self.triggers[i]))
def expand(self, num=1):
scores = [result.accuracy() for result in self.results]
best_trigger = self.triggers[scores.index(max(scores))]
def spawn(trigger):
return trigger.duplicate().add_noise(type_='Gaussian', args={'std': 0.1})
for i in range(num):
self.add(spawn(best_trigger))
def success_triggers(self, threshold=90):
return [self.triggers[i] for i in range(len(self.results)) if (self.results[i].accuracy() >= threshold)] |
def transform_proposals_seg(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0):
boxes = dataset_dict['proposals'].proposal_boxes.tensor.cpu().numpy()
boxes = transforms.apply_box(boxes)
boxes = Boxes(boxes)
objectness_logits = dataset_dict['proposals'].objectness_logits
oh_labels = dataset_dict['proposals'].oh_labels
superpixels = dataset_dict['superpixels'].cpu().numpy()
boxes.clip(image_shape)
keep = boxes.nonempty(threshold=min_box_size)
boxes = boxes[keep]
objectness_logits = objectness_logits[keep]
oh_labels = oh_labels[keep]
proposals = Instances(image_shape)
proposals.proposal_boxes = boxes[:proposal_topk]
proposals.objectness_logits = objectness_logits[:proposal_topk]
proposals.oh_labels = oh_labels[:proposal_topk]
dataset_dict['proposals'] = proposals
superpixels = transforms.apply_segmentation(superpixels.astype('float32'))
dataset_dict['superpixels'] = torch.as_tensor(np.ascontiguousarray(superpixels.astype('int32'))) |
class Fpr(Critic):
def __init__(self, recall_level=0.95):
super().__init__()
self.recall_level = recall_level
def get_name(self):
return (('FPR(' + str((self.recall_level * 100))) + ')')
def stable_cumsum(self, arr, rtol=1e-05, atol=1e-08):
out = np.cumsum(arr, dtype=np.float64)
expected = np.sum(arr, dtype=np.float64)
if (not np.allclose(out[(- 1)], expected, rtol=rtol, atol=atol)):
raise RuntimeError('cumsum was found to be unstable: its last element does not correspond to sum')
return out
def fpr_and_fdr_at_recall(self, y_true, y_score, recall_level, pos_label=None):
classes = np.unique(y_true)
if ((pos_label is None) and (not (np.array_equal(classes, [0, 1]) or np.array_equal(classes, [(- 1), 1]) or np.array_equal(classes, [0]) or np.array_equal(classes, [(- 1)]) or np.array_equal(classes, [1])))):
raise ValueError('Data is not binary and pos_label is not specified')
elif (pos_label is None):
pos_label = 1.0
y_true = (y_true == pos_label)
desc_score_indices = np.argsort(y_score, kind='mergesort')[::(- 1)]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[(distinct_value_indices, (y_true.size - 1))]
tps = self.stable_cumsum(y_true)[threshold_idxs]
fps = ((1 + threshold_idxs) - tps)
thresholds = y_score[threshold_idxs]
recall = (tps / tps[(- 1)])
last_ind = tps.searchsorted(tps[(- 1)])
sl = slice(last_ind, None, (- 1))
(recall, fps, tps, thresholds) = (np.r_[(recall[sl], 1)], np.r_[(fps[sl], 0)], np.r_[(tps[sl], 0)], thresholds[sl])
cutoff = np.argmin(np.abs((recall - recall_level)))
return (fps[cutoff] / np.sum(np.logical_not(y_true)))
def evaluate(self, normal_scores, anomaly_scores):
all_scores = (normal_scores + anomaly_scores)
all_labels = ([1 for _ in range(len(normal_scores))] + [0 for _ in range(len(anomaly_scores))])
return self.fpr_and_fdr_at_recall(np.array(all_labels), np.array(all_scores), self.recall_level) |
class cuFFTPlanCache(object):
def __init__(self, device_index):
self.device_index = device_index
size = cuFFTPlanCacheAttrContextProp(torch._cufft_get_plan_cache_size, '.size is a read-only property showing the number of plans currently in the cache. To change the cache capacity, set cufft_plan_cache.max_size.')
max_size = cuFFTPlanCacheAttrContextProp(torch._cufft_get_plan_cache_max_size, torch._cufft_set_plan_cache_max_size)
def clear(self):
return torch._cufft_clear_plan_cache(self.device_index) |
def convert_fsmt_checkpoint_to_pytorch(fsmt_checkpoint_path, pytorch_dump_folder_path):
assert os.path.exists(fsmt_checkpoint_path)
os.makedirs(pytorch_dump_folder_path, exist_ok=True)
print(f'Writing results to {pytorch_dump_folder_path}')
checkpoint_file = basename(fsmt_checkpoint_path)
fsmt_folder_path = dirname(fsmt_checkpoint_path)
cls = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
models = cls.hub_models()
kwargs = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
data_name_or_path = ''
print(f'using checkpoint {checkpoint_file}')
chkpt = hub_utils.from_pretrained(fsmt_folder_path, checkpoint_file, data_name_or_path, archive_map=models, **kwargs)
args = dict(vars(chkpt['args']))
src_lang = args['source_lang']
tgt_lang = args['target_lang']
data_root = dirname(pytorch_dump_folder_path)
model_dir = basename(pytorch_dump_folder_path)
src_dict_file = os.path.join(fsmt_folder_path, f'dict.{src_lang}.txt')
tgt_dict_file = os.path.join(fsmt_folder_path, f'dict.{tgt_lang}.txt')
src_dict = Dictionary.load(src_dict_file)
src_vocab = rewrite_dict_keys(src_dict.indices)
src_vocab_size = len(src_vocab)
src_vocab_file = os.path.join(pytorch_dump_folder_path, 'vocab-src.json')
print(f'Generating {src_vocab_file}')
with open(src_vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(src_vocab, ensure_ascii=False, indent=json_indent))
tgt_dict = Dictionary.load(tgt_dict_file)
tgt_vocab = rewrite_dict_keys(tgt_dict.indices)
tgt_vocab_size = len(tgt_vocab)
tgt_vocab_file = os.path.join(pytorch_dump_folder_path, 'vocab-tgt.json')
print(f'Generating {tgt_vocab_file}')
with open(tgt_vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(tgt_vocab, ensure_ascii=False, indent=json_indent))
merges_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES['merges_file'])
fsmt_merges_file = os.path.join(fsmt_folder_path, 'bpecodes')
with open(fsmt_merges_file, encoding='utf-8') as fin:
merges = fin.read()
merges = re.sub(' \\d+$', '', merges, 0, re.M)
print(f'Generating {merges_file}')
with open(merges_file, 'w', encoding='utf-8') as fout:
fout.write(merges)
fsmt_model_config_file = os.path.join(pytorch_dump_folder_path, 'config.json')
assert (args['bpe'] == 'fastbpe'), f"need to extend tokenizer to support bpe={args['bpe']}"
assert (args['tokenizer'] == 'moses'), f"need to extend tokenizer to support bpe={args['tokenizer']}"
model_conf = {'architectures': ['FSMTForConditionalGeneration'], 'model_type': 'fsmt', 'activation_dropout': args['activation_dropout'], 'activation_function': 'relu', 'attention_dropout': args['attention_dropout'], 'd_model': args['decoder_embed_dim'], 'dropout': args['dropout'], 'init_std': 0.02, 'max_position_embeddings': args['max_source_positions'], 'num_hidden_layers': args['encoder_layers'], 'src_vocab_size': src_vocab_size, 'tgt_vocab_size': tgt_vocab_size, 'langs': [src_lang, tgt_lang], 'encoder_attention_heads': args['encoder_attention_heads'], 'encoder_ffn_dim': args['encoder_ffn_embed_dim'], 'encoder_layerdrop': args['encoder_layerdrop'], 'encoder_layers': args['encoder_layers'], 'decoder_attention_heads': args['decoder_attention_heads'], 'decoder_ffn_dim': args['decoder_ffn_embed_dim'], 'decoder_layerdrop': args['decoder_layerdrop'], 'decoder_layers': args['decoder_layers'], 'bos_token_id': 0, 'pad_token_id': 1, 'eos_token_id': 2, 'is_encoder_decoder': True, 'scale_embedding': (not args['no_scale_embedding']), 'tie_word_embeddings': args['share_all_embeddings']}
model_conf['num_beams'] = 5
model_conf['early_stopping'] = False
if ((model_dir in best_score_hparams) and ('length_penalty' in best_score_hparams[model_dir])):
model_conf['length_penalty'] = best_score_hparams[model_dir]['length_penalty']
else:
model_conf['length_penalty'] = 1.0
print(f'Generating {fsmt_model_config_file}')
with open(fsmt_model_config_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(model_conf, ensure_ascii=False, indent=json_indent))
fsmt_tokenizer_config_file = os.path.join(pytorch_dump_folder_path, TOKENIZER_CONFIG_FILE)
tokenizer_conf = {'langs': [src_lang, tgt_lang], 'model_max_length': 1024}
print(f'Generating {fsmt_tokenizer_config_file}')
with open(fsmt_tokenizer_config_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(tokenizer_conf, ensure_ascii=False, indent=json_indent))
model = chkpt['models'][0]
model_state_dict = model.state_dict()
model_state_dict = OrderedDict(((('model.' + k), v) for (k, v) in model_state_dict.items()))
ignore_keys = ['model.model', 'model.encoder.version', 'model.decoder.version', 'model.encoder_embed_tokens.weight', 'model.decoder_embed_tokens.weight', 'model.encoder.embed_positions._float_tensor', 'model.decoder.embed_positions._float_tensor']
for k in ignore_keys:
model_state_dict.pop(k, None)
config = FSMTConfig.from_pretrained(pytorch_dump_folder_path)
model_new = FSMTForConditionalGeneration(config)
model_new.load_state_dict(model_state_dict, strict=False)
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
print(f'Generating {pytorch_weights_dump_path}')
torch.save(model_state_dict, pytorch_weights_dump_path)
print('Conversion is done!')
print('\nLast step is to upload the files to s3')
print(f'cd {data_root}')
print(f'transformers-cli upload {model_dir}')
print('Note: CDN caches files for up to 24h, so either use a local model path or use `from_pretrained(mname, use_cdn=False)` to use the non-cached version.') |
class MMFDatasetBuilder(BaseDatasetBuilder):
ZOO_CONFIG_PATH = None
ZOO_VARIATION = None
def __init__(self, dataset_name, dataset_class=None, zoo_variation='defaults', *args, **kwargs):
super().__init__(dataset_name)
self.dataset_class = dataset_class
self.zoo_type = 'datasets'
self.zoo_variation = zoo_variation
def dataset_class(self):
return self._dataset_class
_class.setter
def dataset_class(self, dataset_class):
self._dataset_class = dataset_class
def zoo_variation(self):
return self._zoo_variation
_variation.setter
def zoo_variation(self, zoo_variation):
self._zoo_variation = zoo_variation
def zoo_config_path(self):
if (self.ZOO_CONFIG_PATH is None):
self.ZOO_CONFIG_PATH = get_global_config('env.dataset_zoo')
return self.ZOO_CONFIG_PATH
_config_path.setter
def zoo_config_path(self, zoo_config_path):
self.ZOO_CONFIG_PATH = zoo_config_path
def set_dataset_class(self, dataset_cls):
self.dataset_class = dataset_cls
def build(self, config, dataset_type='train', *args, **kwargs):
self.config = config
requirements = config.get('zoo_requirements', [])
if (len(requirements) == 0):
self._download_requirement(config, self.dataset_name, self.zoo_variation)
else:
for requirement in requirements:
self._download_requirement(config, requirement)
def _download_requirement(self, config, requirement_key, requirement_variation='defaults'):
(version, resources) = get_zoo_config(requirement_key, requirement_variation, self.zoo_config_path, self.zoo_type)
if (resources is None):
return
requirement_split = requirement_key.split('.')
dataset_name = requirement_split[0]
if (len(requirement_split) >= 2):
dataset_variation = requirement_split[1]
else:
dataset_variation = requirement_variation
download_path = os.path.join(get_mmf_env('data_dir'), 'datasets', dataset_name, dataset_variation)
download_path = get_absolute_path(download_path)
if (not isinstance(resources, collections.abc.Mapping)):
self._download_resources(resources, download_path, version)
else:
use_features = config.get('use_features', False)
use_images = config.get('use_images', False)
if use_features:
self._download_based_on_attribute(resources, download_path, version, 'features')
if use_images:
self._download_based_on_attribute(resources, download_path, version, 'images')
self._download_based_on_attribute(resources, download_path, version, 'annotations')
self._download_resources(resources.get('extras', []), download_path, version)
def load(self, config, dataset_type, *args, **kwargs):
self.config = config
split_dataset_from_train = self.config.get('split_train', False)
if split_dataset_from_train:
config = self._modify_dataset_config_for_split(config)
annotations = self._read_annotations(config, dataset_type)
if (annotations is None):
return None
datasets = []
for imdb_idx in range(len(annotations)):
dataset_class = self.dataset_class
dataset = dataset_class(config, dataset_type, imdb_idx)
datasets.append(dataset)
dataset = MMFConcatDataset(datasets)
if split_dataset_from_train:
dataset = self._split_dataset_from_train(dataset, dataset_type)
self.dataset = dataset
return self.dataset
def _split_dataset_from_train(self, dataset, dataset_type):
if ((dataset_type in self.config.split_train.keys()) or (dataset_type == 'train')):
(start, end) = self._calculate_split_for_dataset_type(dataset_type)
dataset_length = len(dataset)
(start, end) = (round((start * dataset_length)), round((end * dataset_length)))
if (start > end):
raise ValueError(f'Train split ratio for {dataset_type} must be positive.')
indices = self._generate_permuted_indexes(dataset_length)[start:end]
dataset = MMFSubset(dataset, indices)
print(f'Dataset type: {dataset_type} length: {len(dataset)} total: {dataset_length}')
return dataset
def _generate_permuted_indexes(self, dataset_length):
generator = torch.Generator()
generator.manual_seed(self.config.get('split_train.seed', 123456))
return torch.randperm(dataset_length, generator=generator)
def _modify_dataset_config_for_split(self, config):
with open_dict(config):
for data_type in config.split_train:
if (data_type == 'seed'):
continue
if config.use_images:
config.images[data_type] = deepcopy(config.images.train)
if config.use_features:
config.features[data_type] = deepcopy(config.features.train)
config.annotations[data_type] = deepcopy(config.annotations.train)
return config
def _read_annotations(self, config, dataset_type):
annotations = config.get('annotations', {}).get(dataset_type, [])
if isinstance(annotations, str):
annotations = [annotations]
if (len(annotations) == 0):
warnings.warn(((('Dataset type {} is not present or empty in ' + 'annotations of dataset config or either annotations ') + 'key is not present. Returning None. ') + "This dataset won't be used.".format(dataset_type)))
return None
return annotations
def _calculate_split_for_dataset_type(self, dataset_type):
start = 0.0
for data_type in self.config.split_train:
if (data_type == 'seed'):
continue
if (dataset_type == data_type):
return (start, (start + self.config.split_train[data_type]))
start += self.config.split_train[data_type]
if (start > 1.0):
raise ValueError(('Ratios of val plus test should not exceed 100%.' + ' Need to leave some percentage for training.'))
elif (start == 1.0):
warnings.warn('All data in training set is used for val and/or test.')
if (dataset_type == 'train'):
return (start, 1.0)
def _download_based_on_attribute(self, resources, download_path, version, attribute):
path = os.path.join(download_path, attribute)
self._download_resources(resources.get(attribute, []), path, version)
def _download_resources(self, resources, path, version):
download.download_resources(resources, path, version) |
class MPNetTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = MPNetTokenizer
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, tokenizer_file=None, do_lower_case=True, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='[UNK]', pad_token='<pad>', mask_token='<mask>', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if ((pre_tok_state.get('do_lower_case', do_lower_case) != do_lower_case) or (pre_tok_state.get('strip_accents', strip_accents) != strip_accents)):
pre_tok_class = getattr(normalizers, pre_tok_state.pop('type'))
pre_tok_state['do_lower_case'] = do_lower_case
pre_tok_state['strip_accents'] = strip_accents
self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state)
self.do_lower_case = do_lower_case
def mask_token(self) -> str:
if ((self._mask_token is None) and self.verbose):
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
_token.setter
def mask_token(self, value):
value = (AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value)
self._mask_token = value
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = (([self.bos_token_id] + token_ids_0) + [self.eos_token_id])
if (token_ids_1 is None):
return output
return (((output + [self.eos_token_id]) + token_ids_1) + [self.eos_token_id])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files) |
def main():
args = get_args()
for class_no in os.listdir(args.feature_dir):
print('Class index ', class_no)
compute_mean_vector(class_no, args.save_path, args.feature_dir) |
class AlignTextConfig(PretrainedConfig):
model_type = 'align_text_model'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.pad_token_id = pad_token_id
def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], **kwargs) -> 'PretrainedConfig':
(config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if (config_dict.get('model_type') == 'align'):
config_dict = config_dict['text_config']
if (('model_type' in config_dict) and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type)):
logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(config_dict, **kwargs) |
def strip_artist(s):
s = s.lower()
s = s.replace('the ', '')
keys = [' - ', '/', ' ft', 'feat', 'featuring', ' and ', ' with ', '_', ' vs', '&', ';', '+']
for key in keys:
loc = s.find(key)
if (loc != (- 1)):
s = s[:loc]
return s |
class HessianProblem():
def __init__(self, db: database.Database, form_handler: _forms.ControlFormHandler, adjoint_form_handler: _forms.AdjointFormHandler, gradient_problem: control_gradient_problem.ControlGradientProblem, box_constraints: boxc.BoxConstraints) -> None:
self.db = db
self.form_handler = form_handler
self.adjoint_form_handler = adjoint_form_handler
self.gradient_problem = gradient_problem
self.box_constraints = box_constraints
self.config = self.db.config
self.inner_newton = self.config.get('AlgoTNM', 'inner_newton')
self.max_it_inner_newton = self.config.getint('AlgoTNM', 'max_it_inner_newton')
self.inner_newton_rtol = self.config.getfloat('AlgoTNM', 'inner_newton_rtol')
self.inner_newton_atol = self.config.getfloat('AlgoTNM', 'inner_newton_atol')
self.test_directions = self.form_handler.hessian_form_handler.test_directions
self.residual = _utils.create_function_list(self.db.function_db.control_spaces)
self.delta_control = _utils.create_function_list(self.db.function_db.control_spaces)
self.temp1 = _utils.create_function_list(self.db.function_db.control_spaces)
self.temp2 = _utils.create_function_list(self.db.function_db.control_spaces)
self.p = _utils.create_function_list(self.db.function_db.control_spaces)
self.p_prev = _utils.create_function_list(self.db.function_db.control_spaces)
self.p_pprev = _utils.create_function_list(self.db.function_db.control_spaces)
self.s = _utils.create_function_list(self.db.function_db.control_spaces)
self.s_prev = _utils.create_function_list(self.db.function_db.control_spaces)
self.s_pprev = _utils.create_function_list(self.db.function_db.control_spaces)
self.q = _utils.create_function_list(self.db.function_db.control_spaces)
self.q_prev = _utils.create_function_list(self.db.function_db.control_spaces)
self.hessian_actions = _utils.create_function_list(self.db.function_db.control_spaces)
self.inactive_part = _utils.create_function_list(self.db.function_db.control_spaces)
self.active_part = _utils.create_function_list(self.db.function_db.control_spaces)
self.state_A_tensors = [fenics.PETScMatrix() for _ in range(self.db.parameter_db.state_dim)]
self.state_b_tensors = [fenics.PETScVector() for _ in range(self.db.parameter_db.state_dim)]
self.adjoint_A_tensors = [fenics.PETScMatrix() for _ in range(self.db.parameter_db.state_dim)]
self.adjoint_b_tensors = [fenics.PETScVector() for _ in range(self.db.parameter_db.state_dim)]
self.state_dim = self.db.parameter_db.state_dim
self.picard_rtol = self.config.getfloat('StateSystem', 'picard_rtol')
self.picard_atol = self.config.getfloat('StateSystem', 'picard_atol')
self.picard_max_iter = self.config.getint('StateSystem', 'picard_iter')
self.picard_verbose = self.config.getboolean('StateSystem', 'picard_verbose')
self.no_sensitivity_solves = 0
self.bcs_list_ad = self.adjoint_form_handler.bcs_list_ad
option: _typing.KspOption = {'ksp_type': 'cg', 'pc_type': 'hypre', 'pc_hypre_type': 'boomeramg', 'pc_hypre_boomeramg_strong_threshold': 0.7, 'ksp_rtol': 1e-16, 'ksp_atol': 1e-50, 'ksp_max_it': 100}
self.riesz_ksp_options: List[_typing.KspOption] = []
for _ in range(len(self.db.function_db.controls)):
self.riesz_ksp_options.append(option)
def hessian_application(self, h: List[fenics.Function], out: List[fenics.Function]) -> None:
for i in range(len(self.test_directions)):
self.test_directions[i].vector().vec().aypx(0.0, h[i].vector().vec())
self.test_directions[i].vector().apply('')
self.bcs_list_ad = self.adjoint_form_handler.bcs_list_ad
if ((not self.config.getboolean('StateSystem', 'picard_iteration')) or (self.state_dim == 1)):
for i in range(self.state_dim):
_utils.assemble_and_solve_linear(self.form_handler.hessian_form_handler.sensitivity_eqs_lhs[i], self.form_handler.hessian_form_handler.sensitivity_eqs_rhs[i], self.bcs_list_ad[i], fun=self.db.function_db.states_prime[i], ksp_options=self.db.parameter_db.state_ksp_options[i], comm=self.db.geometry_db.mpi_comm, preconditioner_form=self.db.form_db.preconditioner_forms[i])
for i in range(self.state_dim):
_utils.assemble_and_solve_linear(self.form_handler.hessian_form_handler.adjoint_sensitivity_eqs_lhs[((- 1) - i)], self.form_handler.hessian_form_handler.w_1[((- 1) - i)], self.bcs_list_ad[((- 1) - i)], fun=self.db.function_db.adjoints_prime[((- 1) - i)], ksp_options=self.db.parameter_db.adjoint_ksp_options[((- 1) - i)], comm=self.db.geometry_db.mpi_comm, preconditioner_form=self.db.form_db.preconditioner_forms[((- 1) - i)])
else:
nonlinear_solvers.picard_iteration(self.form_handler.hessian_form_handler.sensitivity_eqs_picard, self.db.function_db.states_prime, self.adjoint_form_handler.bcs_list_ad, max_iter=self.picard_max_iter, rtol=self.picard_rtol, atol=self.picard_atol, verbose=self.picard_verbose, inner_damped=False, inner_inexact=False, inner_verbose=False, inner_max_iter=2, ksp_options=self.db.parameter_db.state_ksp_options, A_tensors=self.state_A_tensors, b_tensors=self.state_b_tensors, inner_is_linear=True, preconditioner_forms=self.db.form_db.preconditioner_forms)
nonlinear_solvers.picard_iteration(self.form_handler.hessian_form_handler.adjoint_sensitivity_eqs_picard, self.db.function_db.adjoints_prime, self.adjoint_form_handler.bcs_list_ad, max_iter=self.picard_max_iter, rtol=self.picard_rtol, atol=self.picard_atol, verbose=self.picard_verbose, inner_damped=False, inner_inexact=False, inner_verbose=False, inner_max_iter=2, ksp_options=self.db.parameter_db.adjoint_ksp_options, A_tensors=self.adjoint_A_tensors, b_tensors=self.adjoint_b_tensors, inner_is_linear=True, preconditioner_forms=self.db.form_db.preconditioner_forms)
for i in range(len(out)):
b = fenics.as_backend_type(fenics.assemble(self.form_handler.hessian_form_handler.hessian_rhs[i])).vec()
_utils.solve_linear_problem(A=self.form_handler.riesz_projection_matrices[i], b=b, fun=out[i], ksp_options=self.riesz_ksp_options[i], comm=self.db.geometry_db.mpi_comm)
self.no_sensitivity_solves += 2
def reduced_hessian_application(self, h: List[fenics.Function], out: List[fenics.Function]) -> None:
for j in range(len(out)):
out[j].vector().vec().set(0.0)
out[j].vector().apply('')
self.box_constraints.restrictor.restrict_to_inactive_set(h, self.inactive_part)
self.hessian_application(self.inactive_part, self.hessian_actions)
self.box_constraints.restrictor.restrict_to_inactive_set(self.hessian_actions, self.inactive_part)
self.box_constraints.restrictor.restrict_to_active_set(h, self.active_part)
for j in range(len(out)):
out[j].vector().vec().aypx(0.0, (self.active_part[j].vector().vec() + self.inactive_part[j].vector().vec()))
out[j].vector().apply('')
def newton_solve(self) -> List[fenics.Function]:
self.gradient_problem.solve()
self.box_constraints.restrictor.compute_active_sets()
for fun in self.delta_control:
fun.vector().vec().set(0.0)
fun.vector().apply('')
if (self.inner_newton.casefold() == 'cg'):
self.cg()
elif (self.inner_newton.casefold() == 'cr'):
self.cr()
return self.delta_control
def cg(self) -> None:
for j in range(len(self.residual)):
self.residual[j].vector().vec().aypx(0.0, (- self.db.function_db.gradient[j].vector().vec()))
self.residual[j].vector().apply('')
self.p[j].vector().vec().aypx(0.0, self.residual[j].vector().vec())
self.p[j].vector().apply('')
rsold = self.form_handler.scalar_product(self.residual, self.residual)
eps_0 = np.sqrt(rsold)
for _ in range(self.max_it_inner_newton):
self.reduced_hessian_application(self.p, self.q)
self.box_constraints.restrictor.restrict_to_active_set(self.p, self.temp1)
sp_val1 = self.form_handler.scalar_product(self.temp1, self.temp1)
self.box_constraints.restrictor.restrict_to_inactive_set(self.p, self.temp1)
self.box_constraints.restrictor.restrict_to_inactive_set(self.q, self.temp2)
sp_val2 = self.form_handler.scalar_product(self.temp1, self.temp2)
sp_val = (sp_val1 + sp_val2)
alpha = (rsold / sp_val)
for j in range(len(self.delta_control)):
self.delta_control[j].vector().vec().axpy(alpha, self.p[j].vector().vec())
self.delta_control[j].vector().apply('')
self.residual[j].vector().vec().axpy((- alpha), self.q[j].vector().vec())
self.residual[j].vector().apply('')
rsnew = self.form_handler.scalar_product(self.residual, self.residual)
eps = np.sqrt(rsnew)
_loggers.debug(f'Residual of the CG method: {(eps / eps_0):.3e} (relative)')
if (eps < (self.inner_newton_atol + (self.inner_newton_rtol * eps_0))):
break
beta = (rsnew / rsold)
for j in range(len(self.p)):
self.p[j].vector().vec().aypx(beta, self.residual[j].vector().vec())
self.p[j].vector().apply('')
rsold = rsnew
def cr(self) -> None:
for j in range(len(self.residual)):
self.residual[j].vector().vec().aypx(0.0, (- self.db.function_db.gradient[j].vector().vec()))
self.residual[j].vector().apply('')
self.p[j].vector().vec().aypx(0.0, self.residual[j].vector().vec())
self.p[j].vector().apply('')
eps_0 = np.sqrt(self.form_handler.scalar_product(self.residual, self.residual))
self.reduced_hessian_application(self.residual, self.s)
for j in range(len(self.q)):
self.q[j].vector().vec().aypx(0.0, self.s[j].vector().vec())
self.q[j].vector().apply('')
self.box_constraints.restrictor.restrict_to_active_set(self.residual, self.temp1)
self.box_constraints.restrictor.restrict_to_active_set(self.s, self.temp2)
sp_val1 = self.form_handler.scalar_product(self.temp1, self.temp2)
self.box_constraints.restrictor.restrict_to_inactive_set(self.residual, self.temp1)
self.box_constraints.restrictor.restrict_to_inactive_set(self.s, self.temp2)
sp_val2 = self.form_handler.scalar_product(self.temp1, self.temp2)
rar = (sp_val1 + sp_val2)
for i in range(self.max_it_inner_newton):
self.box_constraints.restrictor.restrict_to_active_set(self.q, self.temp1)
self.box_constraints.restrictor.restrict_to_inactive_set(self.q, self.temp2)
denom1 = self.form_handler.scalar_product(self.temp1, self.temp1)
denom2 = self.form_handler.scalar_product(self.temp2, self.temp2)
denominator = (denom1 + denom2)
alpha = (rar / denominator)
for j in range(len(self.delta_control)):
self.delta_control[j].vector().vec().axpy(alpha, self.p[j].vector().vec())
self.delta_control[j].vector().apply('')
self.residual[j].vector().vec().axpy((- alpha), self.q[j].vector().vec())
self.residual[j].vector().apply('')
eps = np.sqrt(self.form_handler.scalar_product(self.residual, self.residual))
_loggers.debug(f'Residual of the CR method: {(eps / eps_0):.3e} (relative)')
if ((eps < (self.inner_newton_atol + (self.inner_newton_rtol * eps_0))) or (i == (self.max_it_inner_newton - 1))):
break
self.reduced_hessian_application(self.residual, self.s)
self.box_constraints.restrictor.restrict_to_active_set(self.residual, self.temp1)
self.box_constraints.restrictor.restrict_to_active_set(self.s, self.temp2)
sp_val1 = self.form_handler.scalar_product(self.temp1, self.temp2)
self.box_constraints.restrictor.restrict_to_inactive_set(self.residual, self.temp1)
self.box_constraints.restrictor.restrict_to_inactive_set(self.s, self.temp2)
sp_val2 = self.form_handler.scalar_product(self.temp1, self.temp2)
rar_new = (sp_val1 + sp_val2)
beta = (rar_new / rar)
for j in range(len(self.p)):
self.p[j].vector().vec().aypx(beta, self.residual[j].vector().vec())
self.p[j].vector().apply('')
self.q[j].vector().vec().aypx(beta, self.s[j].vector().vec())
self.q[j].vector().apply('')
rar = rar_new |
def count_letter_ngram(sentence, n=3):
if (len(sentence) < n):
return set(sentence)
local_counts = set()
for k in range(((len(sentence.strip()) - n) + 1)):
local_counts.add(sentence[k:(k + n)])
return local_counts |
class network_29layers_Custom(network_29layers):
def forward(self, x, nrm=True):
x = self.conv1(x)
x = self.pool1(x)
x = self.block1(x)
x = self.group1(x)
x = self.pool2(x)
x = self.block2(x)
x = self.group2(x)
x = self.pool3(x)
x = self.block3(x)
x = self.group3(x)
x = self.block4(x)
x = self.group4(x)
x = self.pool4(x)
x = x.view(x.size(0), (- 1))
fc = self.fc(x)
fc = F.dropout(fc, training=self.training)
if (nrm is False):
return fc
xnorm = F.normalize(fc, p=2, dim=1)
return xnorm |
def length(node, indices):
return expr.Expr(impl.get_runtime().compiling_callable.ast_builder().expr_snode_length(node._snode.ptr, expr.make_expr_group(indices)), dbg_info=_ti_core.DebugInfo(impl.get_runtime().get_current_src_info())) |
def GenerateSM90_TensorOp_1684_complex_gaussian(manifest, cuda_version):
if (not CudaToolkitVersionSatisfies(cuda_version, 11, 8)):
return
layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor)]
math_inst = MathInstruction([16, 8, 4], DataType.f64, DataType.f64, DataType.f64, OpcodeClass.TensorOp, MathOperation.multiply_add_complex_gaussian)
min_cc = 90
max_cc = 1024
alignment_constraints = [1]
tile_descriptions = [TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc), TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc)]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [(ComplexTransform.none, ComplexTransform.none), (ComplexTransform.conj, ComplexTransform.none), (ComplexTransform.none, ComplexTransform.conj), (ComplexTransform.conj, ComplexTransform.conj)]
CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints, complex_transforms) |
class AssistQVideoPath(Dataset):
def __init__(self, root, split, save_dir, num_gpus):
super().__init__()
(videos, save_dirs) = get_assistq_videos(root, split, save_dir)
(self.videos, self.save_dirs) = ([], [])
for (video, save_dir) in zip(videos, save_dirs):
self.videos.append(video)
self.save_dirs.append(save_dir)
print(f'total video data size: {len(self.videos)}')
n = ((len(self.videos) // num_gpus) + 1)
self.videos = [self.videos[(n * i):(n * (i + 1))] for i in range(num_gpus)]
self.save_dirs = [self.save_dirs[(n * i):(n * (i + 1))] for i in range(num_gpus)]
self.num_gpus = num_gpus
def __getitem__(self, index):
return (self.videos[index], self.save_dirs[index])
def __len__(self):
return self.num_gpus |
def save(papers, authors, edges):
biadjacency = sparse.coo_matrix((np.ones(len(edges), dtype=np.bool), (edges['paper_node_id'], edges['author_node_id'])))
papers.drop('paper_node_id', axis=1, inplace=True)
authors.drop('author_node_id', axis=1, inplace=True)
edges.drop('paper_node_id', axis=1, inplace=True)
edges.drop('author_node_id', axis=1, inplace=True)
print('saving:')
print(' paper table: {:,} papers, {:,} features'.format(*papers.shape))
papers.to_csv('s2_2_bipartite_graph/papers.csv')
print(' edges table: {:,} edges'.format(edges.shape[0]))
edges.to_csv('s2_2_bipartite_graph/paper_author_edges.csv', index=False)
print(' biadjacency matrix: {:,} papers, {:,} authors, {:,} edges'.format(*biadjacency.shape, biadjacency.nnz))
sparse.save_npz('s2_2_bipartite_graph/paper_author_biadjacency.npz', biadjacency) |
class FullTokenizer(object):
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_tokens_to_ids(self.vocab, tokens) |
class Parse_Vuln():
json_data = dict()
filename = 'vulnerabilities.json'
tools_info_obj = tools_info.Tools_Info()
def ParseArgs(self):
Args = argparse.ArgumentParser(description='Parser to parse vulnerability result file into JSON')
Args.add_argument('--src', required=True, help='result file absolute path to parse')
Args.add_argument('--dst', required=True, help='output file absolute path to generate JSON file from result')
return Args.parse_args()
def read_file_content(self, args):
inpFile = args.src
output_File = args.dst
file_obj = open(inpFile)
data = json.load(file_obj)
self.tools_info_obj.get_threshold()
for filename in data:
final_result = {key: {} for key in self.tools_info_obj.vuln_threshold.keys()}
majority_result = {}
for vuln in data[filename].keys():
if (vuln != 'LE'):
for tool_name in data[filename][vuln].keys():
for lineno in data[filename][vuln][tool_name]:
final_result[vuln][lineno] = 0
if (vuln == 'LE'):
for tool_name in data[filename][vuln]:
final_result[vuln][1] = 0
for vuln in data[filename].keys():
if (vuln != 'LE'):
for tool_name in data[filename][vuln].keys():
for lineno in data[filename][vuln][tool_name]:
final_result[vuln][lineno] += 1
if (vuln == 'LE'):
for tool_name in data[filename][vuln]:
final_result[vuln][1] += 1
final_result = dict([(k, v) for (k, v) in final_result.items() if (len(v) > 0)])
for vuln in final_result:
majority_result[vuln] = []
print_vuln = []
for vuln in final_result:
for lineno in final_result[vuln]:
if (final_result[vuln][lineno] >= self.tools_info_obj.vuln_threshold[vuln]):
if (vuln == 'LE'):
majority_result[vuln].append('Yes')
elif (vuln != 'LE'):
majority_result[vuln].append(lineno)
majority_result = dict([(k, v) for (k, v) in majority_result.items() if ((type(v) == int) or (len(v) > 0))])
self.json_data[filename] = majority_result
self.json_data = dict([(k, v) for (k, v) in self.json_data.items() if (len(v) > 0)])
file_obj.close()
with open(output_File, 'w') as f:
json.dump(self.json_data, f, sort_keys=True, indent=4) |
class JointPPO():
def __init__(self, actor_critic, clip_param, ppo_epoch, num_mini_batch, value_loss_coef, entropy_coef, lr=None, eps=None, max_grad_norm=None, use_clipped_value_loss=False):
self.actor_critic = actor_critic
self.clip_param = clip_param
self.ppo_epoch = ppo_epoch
self.num_mini_batch = num_mini_batch
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
self.use_clipped_value_loss = use_clipped_value_loss
self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr)
def update(self, rollouts_list):
advantages_list = []
for rollout in rollouts_list:
advantages = (rollout.returns[:(- 1)] - rollout.value_preds[:(- 1)])
advantages = ((advantages - advantages.mean()) / (advantages.std() + 1e-05))
advantages_list.append(advantages)
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
for e in range(self.ppo_epoch):
if self.actor_critic.is_recurrent:
raise NotImplementedError('sampler not implemented for recurrent policies')
else:
data_generator = magent_feed_forward_generator(rollouts_list, advantages_list, self.num_mini_batch)
for sample in data_generator:
(obs_batch, recurrent_hidden_states_batch, actions_batch, value_preds_batch, return_batch, masks_batch, old_action_log_probs_batch, adv_targ) = sample
(values, action_log_probs, dist_entropy, _) = self.actor_critic.evaluate_actions(obs_batch, recurrent_hidden_states_batch, masks_batch, actions_batch)
ratio = torch.exp((action_log_probs - old_action_log_probs_batch))
surr1 = (ratio * adv_targ)
surr2 = (torch.clamp(ratio, (1.0 - self.clip_param), (1.0 + self.clip_param)) * adv_targ)
action_loss = (- torch.min(surr1, surr2).mean())
if self.use_clipped_value_loss:
value_pred_clipped = (value_preds_batch + (values - value_preds_batch).clamp((- self.clip_param), self.clip_param))
value_losses = (values - return_batch).pow(2)
value_losses_clipped = (value_pred_clipped - return_batch).pow(2)
value_loss = (0.5 * torch.max(value_losses, value_losses_clipped).mean())
else:
value_loss = (0.5 * F.mse_loss(return_batch, values))
self.optimizer.zero_grad()
(((value_loss * self.value_loss_coef) + action_loss) - (dist_entropy * self.entropy_coef)).backward()
nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
dist_entropy_epoch += dist_entropy.item()
num_updates = (self.ppo_epoch * self.num_mini_batch)
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
return (value_loss_epoch, action_loss_epoch, dist_entropy_epoch) |
def markinnerspaces(line):
l = ''
f = 0
cc = "'"
cb = ''
for c in line:
if ((cb == '\\') and (c in ['\\', "'", '"'])):
l = (l + c)
cb = c
continue
if ((f == 0) and (c in ["'", '"'])):
cc = c
if (c == cc):
f = (f + 1)
elif (c == cc):
f = (f - 1)
elif ((c == ' ') and (f == 1)):
l = (l + '_')
continue
l = (l + c)
cb = c
return l |
class MixSpec(ComplexitySpec):
environment = {'asr_acc': 0.7, 'asr_std': 0.15}
proposition = {'yn_question': 0.4, 'reject_style': {'reject': 0.5, 'reject+inform': 0.5}, 'multi_slots': {1: 0.7, 2: 0.3}, 'dont_care': 0.1, 'multi_goals': {1: 0.6, 2: 0.4}}
interaction = {'hesitation': 0.4, 'self_restart': 0.1, 'self_correct': 0.2}
social = {'self_disclosure': None, 'ref_shared': None, 'violation_sn': None} |
class IBMVPCInstance():
def __init__(self, name, ibm_vpc_config, ibm_vpc_client=None, public=False):
self.name = name.lower()
self.config = ibm_vpc_config
self.delete_on_dismantle = self.config['delete_on_dismantle']
self.profile_name = self.config['worker_profile_name']
self.vpc_cli = None
self.vpc_cli = (ibm_vpc_client or self._create_vpc_client())
self.public = public
self.ssh_client = None
self.instance_id = None
self.instance_data = None
self.private_ip = None
self.public_ip = None
self.floating_ip_id = None
self.home_dir = '/root'
self.ssh_credentials = {'username': self.config['ssh_username'], 'password': self.config['ssh_password'], 'key_filename': self.config.get('ssh_key_filename', '~/.ssh/id_rsa')}
self.validated = False
def __str__(self):
ip = (self.public_ip if self.public else self.private_ip)
if ((ip is None) or (ip == '0.0.0.0')):
return f'VM instance {self.name}'
else:
return f'VM instance {self.name} ({ip})'
('ibm_vpc', 'ibm_cloud_sdk_core', pip_extra='ibmcloud')
def _create_vpc_client(ibm_vpc, ibm_cloud_sdk_core, self):
authenticator = ibm_cloud_sdk_core.authenticators.IAMAuthenticator(self.config.get('iam_api_key'), url=self.config.get('iam_endpoint'))
ibm_vpc_client = ibm_vpc.VpcV1(VPC_API_VERSION, authenticator=authenticator)
ibm_vpc_client.set_service_url((self.config['endpoint'] + '/v1'))
decorate_instance(self.vpc_cli, vpc_retry_on_except)
return ibm_vpc_client
def get_ssh_client(self):
if ((not self.validated) and self.public and self.instance_id):
key_filename = self.ssh_credentials['key_filename']
key_filename = os.path.abspath(os.path.expanduser(key_filename))
if (not os.path.exists(key_filename)):
raise Exception(f"Private key file {key_filename} doesn't exist")
initialization_data = self.vpc_cli.get_instance_initialization(self.instance_id).get_result()
private_res = paramiko.RSAKey(filename=key_filename).get_base64()
names = []
for k in initialization_data['keys']:
public_res = self.vpc_cli.get_key(k['id']).get_result()['public_key'].split(' ')[1]
if (public_res == private_res):
self.validated = True
break
else:
names.append(k['name'])
if (not self.validated):
raise Exception(f'No public key from keys: {names} on master {self} not a pair for private ssh key {key_filename}')
if (self.private_ip or self.public_ip):
if (not self.ssh_client):
self.ssh_client = SSHClient((self.public_ip or self.private_ip), self.ssh_credentials)
return self.ssh_client
def del_ssh_client(self):
if self.ssh_client:
try:
self.ssh_client.close()
except Exception:
pass
self.ssh_client = None
def is_ready(self):
login_type = ('password' if (('password' in self.ssh_credentials) and (not self.public)) else 'publickey')
try:
self.get_ssh_client().run_remote_command('id')
except Exception as err:
logger.debug(f'SSH to {(self.public_ip if self.public else self.private_ip)} failed ({login_type}): {err}')
self.del_ssh_client()
return False
return True
def wait_ready(self, timeout=INSTANCE_START_TIMEOUT):
logger.debug(f'Waiting {self} to become ready')
start = time.time()
if self.public:
self.get_public_ip()
else:
self.get_private_ip()
while ((time.time() - start) < timeout):
if self.is_ready():
start_time = round((time.time() - start), 2)
logger.debug(f'{self} ready in {start_time} seconds')
return True
time.sleep(5)
raise TimeoutError(f'Readiness probe expired on {self}')
('ibm_cloud_sdk_core', pip_extra='ibmcloud')
def _create_instance(ibm_cloud_sdk_core, self, user_data):
logger.debug('Creating new VM instance {}'.format(self.name))
security_group_identity_model = {'id': self.config['security_group_id']}
subnet_identity_model = {'id': self.config['subnet_id']}
primary_network_interface = {'name': 'eth0', 'subnet': subnet_identity_model, 'security_groups': [security_group_identity_model]}
boot_volume_data = {'capacity': self.config['boot_volume_capacity'], 'name': f'{self.name}-{str(uuid.uuid4())[:4]}-boot', 'profile': {'name': self.config['boot_volume_profile']}}
boot_volume_attachment = {'delete_volume_on_instance_delete': True, 'volume': boot_volume_data}
key_identity_model = {'id': self.config['ssh_key_id']}
instance_prototype = {}
instance_prototype['name'] = self.name
instance_prototype['keys'] = [key_identity_model]
instance_prototype['profile'] = {'name': self.profile_name}
instance_prototype['resource_group'] = {'id': self.config['resource_group_id']}
instance_prototype['vpc'] = {'id': self.config['vpc_id']}
instance_prototype['image'] = {'id': self.config['image_id']}
instance_prototype['zone'] = {'name': self.config['zone_name']}
instance_prototype['boot_volume_attachment'] = boot_volume_attachment
instance_prototype['primary_network_interface'] = primary_network_interface
if user_data:
instance_prototype['user_data'] = user_data
try:
resp = self.vpc_cli.create_instance(instance_prototype)
except ibm_cloud_sdk_core.ApiException as err:
if ((err.code == 400) and ('already exists' in err.message)):
return self.get_instance_data()
elif ((err.code == 400) and ('over quota' in err.message)):
logger.debug(f'Create VM instance {self.name} failed due to quota limit')
else:
logger.debug('Create VM instance {} failed with status code {}: {}'.format(self.name, str(err.code), err.message))
raise err
logger.debug('VM instance {} created successfully '.format(self.name))
return resp.result
def _attach_floating_ip(self, fip, fip_id, instance):
instance_primary_ni = instance['primary_network_interface']
if (instance_primary_ni['primary_ipv4_address'] and (instance_primary_ni['id'] == fip_id)):
logger.debug('Floating IP {} already attached to eth0'.format(fip))
else:
self.vpc_cli.add_instance_network_interface_floating_ip(instance['id'], instance['network_interfaces'][0]['id'], fip_id)
def _delete_floating_ip(self, fip_id):
response = self.vpc_cli.delete_floating_ip(id=fip_id)
def get_instance_data(self):
instances_data = self.vpc_cli.list_instances(name=self.name).get_result()
if (len(instances_data['instances']) > 0):
self.instance_data = instances_data['instances'][0]
return self.instance_data
def get_instance_id(self):
if (not self.instance_id):
instance_data = self.get_instance_data()
if instance_data:
self.instance_id = instance_data['id']
else:
logger.debug(f'VM instance {self.name} does not exists')
return self.instance_id
def get_private_ip(self):
while ((not self.private_ip) or (self.private_ip == '0.0.0.0')):
time.sleep(1)
instance_data = self.get_instance_data()
self.private_ip = instance_data['primary_network_interface']['primary_ipv4_address']
return self.private_ip
def get_public_ip(self):
return self.public_ip
def create(self, check_if_exists=False, user_data=None):
instance = None
vsi_exists = (True if self.instance_id else False)
if (check_if_exists and (not vsi_exists)):
logger.debug('Checking if VM instance {} already exists'.format(self.name))
instances_data = self.get_instance_data()
if instances_data:
logger.debug('VM instance {} already exists'.format(self.name))
vsi_exists = True
self.instance_id = instances_data['id']
if (not vsi_exists):
instance = self._create_instance(user_data=user_data)
self.instance_id = instance['id']
else:
self.start()
if (self.public and instance):
self._attach_floating_ip(self.public_ip, self.floating_ip_id, instance)
return self.instance_id
('ibm_cloud_sdk_core', pip_extra='ibmcloud')
def start(ibm_cloud_sdk_core, self):
logger.debug('Starting VM instance {}'.format(self.name))
try:
self.vpc_cli.create_instance_action(self.instance_id, 'start')
except ibm_cloud_sdk_core.ApiException as err:
if (err.code == 404):
pass
else:
raise err
logger.debug('VM instance {} started successfully'.format(self.name))
('ibm_cloud_sdk_core', pip_extra='ibmcloud')
def _delete_instance(ibm_cloud_sdk_core, self):
logger.debug('Deleting VM instance {}'.format(self.name))
try:
self.vpc_cli.delete_instance(self.instance_id)
except ibm_cloud_sdk_core.ApiException as err:
if (err.code == 404):
pass
else:
raise err
self.instance_id = None
self.private_ip = None
self.del_ssh_client()
('ibm_cloud_sdk_core', pip_extra='ibmcloud')
def _stop_instance(ibm_cloud_sdk_core, self):
logger.debug('Stopping VM instance {}'.format(self.name))
try:
self.vpc_cli.create_instance_action(self.instance_id, 'stop')
except ibm_cloud_sdk_core.ApiException as err:
if (err.code == 404):
pass
else:
raise err
def stop(self):
if self.delete_on_dismantle:
self._delete_instance()
else:
self._stop_instance()
def delete(self):
self._delete_instance()
self._delete_floating_ip(self.floating_ip_id)
def validate_capabilities(self):
if self.config.get('singlesocket'):
cmd = "lscpu -p=socket|grep -v '#'"
res = self.get_ssh_client().run_remote_command(cmd)
sockets = set()
for char in res:
if (char != '\n'):
sockets.add(char)
if (len(sockets) != 1):
raise Exception(f'Not using single CPU socket as specified, using {len(sockets)} sockets instead') |
class PinyinTestCase(unittest.TestCase):
def test_single_pinyin(self):
sents = ['zhuan', 'zuo']
res = []
for name in sents:
(s, r) = m.correct(name)
print(s, r)
res.append(r)
self.assertEqual(res[0], [])
self.assertEqual(res[1], [])
def test_full_pinyin(self):
sents = ['xingfu', 'pingguo']
res = []
for name in sents:
(s, r) = m.correct(name)
print(s, r)
res.append(r)
self.assertEqual(res[0], [])
self.assertEqual(res[1], []) |
class IntFormat(object):
def from_number(cls, n, min=None):
width = (number_digits(n) + 1)
if (n < 0):
width += 1
repeat = (80 // width)
return cls(width, min, repeat=repeat)
def __init__(self, width, min=None, repeat=None):
self.width = width
self.repeat = repeat
self.min = min
def __repr__(self):
r = 'IntFormat('
if self.repeat:
r += ('%d' % self.repeat)
r += ('I%d' % self.width)
if self.min:
r += ('.%d' % self.min)
return (r + ')')
def fortran_format(self):
r = '('
if self.repeat:
r += ('%d' % self.repeat)
r += ('I%d' % self.width)
if self.min:
r += ('.%d' % self.min)
return (r + ')')
def python_format(self):
return (('%' + str(self.width)) + 'd') |
class Vocab(object):
__slots__ = ('_word_dict', '_entity_dict')
def __init__(self, word_dict, entity_dict):
self._word_dict = word_dict
self._entity_dict = entity_dict
def word_size(self):
return len(self._word_dict)
def entity_size(self):
return len(self._entity_dict)
def words(self):
return iter(self._word_dict)
def entities(self):
return iter(self._entity_dict)
def get_word_index(self, word, default=None):
try:
return self._word_dict.key_id(word)
except KeyError:
return default
def get_entity_index(self, entity, default=None):
try:
return self._entity_dict.key_id(entity)
except KeyError:
return default
def get_word_by_index(self, index):
return self._word_dict.restore_key(index)
def get_entity_by_index(self, index):
return self._entity_dict.restore_key(index)
def build(db, entity_db, min_word_count, min_entity_count):
word_counter = Counter()
entity_counter = Counter()
tokenizer = RegexpTokenizer()
with click.progressbar(db.keys()) as bar:
for title in bar:
obj = db[title]
text = obj['text']
tokens = tokenizer.tokenize(text)
word_counter.update((t.text.lower() for t in tokens))
for (_, title, _) in obj['links']:
title = entity_db.resolve_redirect(title)
entity_counter[title] += 1
word_dict = Trie([w for (w, c) in word_counter.iteritems() if (c >= min_word_count)])
entity_dict = Trie([e for (e, c) in entity_counter.iteritems() if (c >= min_entity_count)])
return Vocab(word_dict, entity_dict)
def save(self, out_file):
self._word_dict.save((out_file + '_word.trie'))
self._entity_dict.save((out_file + '_entity.trie'))
def load(in_file, mmap=True):
word_dict = Trie()
entity_dict = Trie()
word_dict.mmap((in_file + '_word.trie'))
entity_dict.mmap((in_file + '_entity.trie'))
return Vocab(word_dict, entity_dict)
def __reduce__(self):
return (self.__class__, (self._word_dict, self._entity_dict)) |
def click_play_button_off():
pyautogui.doubleClick('screenshots/play_button_off.PNG')
pyautogui.moveTo(1000, 1000, duration=0) |
def compute_metrics_for_regression(y_test, y_test_pred):
metrics = {}
for task in DIMENSIONS:
targets_task = [t[DIMENSIONS.index(task)] for t in y_test]
pred_task = [l[DIMENSIONS.index(task)] for l in y_test_pred]
rmse = mean_squared_error(targets_task, pred_task, squared=False)
metrics[f'rmse_{task}'] = rmse
return metrics |
class DatasetSampler(Dataset):
def __init__(self, x):
self.x = x
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx].astype('float32') |
def check_build_sdist(hooks, build_sys_requires):
with BuildEnvironment() as env:
try:
env.pip_install(build_sys_requires)
log.info('Installed static build dependencies')
except CalledProcessError:
log.error('Failed to install static build dependencies')
return False
try:
reqs = hooks.get_requires_for_build_sdist({})
log.info('Got build requires: %s', reqs)
except Exception:
log.error('Failure in get_requires_for_build_sdist', exc_info=True)
return False
try:
env.pip_install(reqs)
log.info('Installed dynamic build dependencies')
except CalledProcessError:
log.error('Failed to install dynamic build dependencies')
return False
td = mkdtemp()
log.info('Trying to build sdist in %s', td)
try:
try:
filename = hooks.build_sdist(td, {})
log.info('build_sdist returned %r', filename)
except Exception:
log.info('Failure in build_sdist', exc_info=True)
return False
if (not filename.endswith('.tar.gz')):
log.error("Filename %s doesn't have .tar.gz extension", filename)
return False
path = pjoin(td, filename)
if isfile(path):
log.info('Output file %s exists', path)
else:
log.error('Output file %s does not exist', path)
return False
if tarfile.is_tarfile(path):
log.info('Output file is a tar file')
else:
log.error('Output file is not a tar file')
return False
finally:
shutil.rmtree(td)
return True |
.parametrize('task_name', [tn for tn in (all_tasks - julia_tasks)])
def test_obtain_prior_from_task(task_name):
task = get_task(task_name)
prior = task.get_prior()
assert (prior is not None) |
def standard_confusion_matrix(y_test, y_test_pred):
[[tn, fp], [fn, tp]] = confusion_matrix(y_test, y_test_pred)
return np.array([[tp, fp], [fn, tn]]) |
class ASPP_Efficientnetv2(nn.Module):
def __init__(self, num_classes, concat=True, output_kernel_size=1):
super(ASPP_Efficientnetv2, self).__init__()
self.concat = concat
self.conv_1x1_1 = nn.Conv2d(512, 256, kernel_size=1)
self.bn_conv_1x1_1 = nn.BatchNorm2d(256)
self.conv_3x3_1 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=6, dilation=6)
self.bn_conv_3x3_1 = nn.BatchNorm2d(256)
self.conv_3x3_2 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=12, dilation=12)
self.bn_conv_3x3_2 = nn.BatchNorm2d(256)
self.conv_3x3_3 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=18, dilation=18)
self.bn_conv_3x3_3 = nn.BatchNorm2d(256)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_1x1_2 = nn.Conv2d(512, 256, kernel_size=1)
self.bn_conv_1x1_2 = nn.BatchNorm2d(256)
self.conv_1x1_3 = nn.Conv2d(1280, 256, kernel_size=1)
self.bn_conv_1x1_3 = nn.BatchNorm2d(256)
kernel_size = 3
padding = 1
output_padding = 0
if (kernel_size == 3):
output_padding = 1
elif (kernel_size == 2):
padding = 0
if self.concat:
self.upsample_1 = self.upsample(256, 256, 3, padding, output_padding)
self.upsample_2 = self.upsample((256 + 48), 256, 3, padding, output_padding)
else:
self.upsample_1 = self.upsample(256, 256, 3, padding, output_padding)
self.upsample_2 = self.upsample(256, 256, 3, padding, output_padding)
if (output_kernel_size == 3):
padding = 1
elif (output_kernel_size == 2):
padding = 0
elif (output_kernel_size == 1):
padding = 0
self.conv_1x1_4 = nn.Conv2d((256 + 24), num_classes, kernel_size=output_kernel_size, padding=padding)
def upsample(self, in_channels, num_filters, kernel_size, padding, output_padding):
upsample_layer = nn.Sequential(nn.ConvTranspose2d(in_channels, num_filters, kernel_size=kernel_size, stride=2, padding=padding, output_padding=output_padding, bias=False), nn.BatchNorm2d(num_filters), nn.ReLU(inplace=True), nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(num_filters), nn.ReLU(inplace=True), nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(num_filters), nn.ReLU(inplace=True))
return upsample_layer
def forward(self, x_high_feature, l3=None, l2=None):
feature_map_h = x_high_feature.size()[2]
feature_map_w = x_high_feature.size()[3]
out_1x1 = F.relu(self.bn_conv_1x1_1(self.conv_1x1_1(x_high_feature)))
out_3x3_1 = F.relu(self.bn_conv_3x3_1(self.conv_3x3_1(x_high_feature)))
out_3x3_2 = F.relu(self.bn_conv_3x3_2(self.conv_3x3_2(x_high_feature)))
out_3x3_3 = F.relu(self.bn_conv_3x3_3(self.conv_3x3_3(x_high_feature)))
out_img = self.avg_pool(x_high_feature)
out_img = F.relu(self.bn_conv_1x1_2(self.conv_1x1_2(out_img)))
out_img = F.interpolate(out_img, size=(feature_map_h, feature_map_w), mode='bilinear')
out = torch.cat([out_1x1, out_3x3_1, out_3x3_2, out_3x3_3, out_img], 1)
out = F.relu(self.bn_conv_1x1_3(self.conv_1x1_3(out)))
if self.concat:
x = self.upsample_1(out)
x = torch.cat([x, l3], 1)
x = self.upsample_2(x)
else:
x = self.upsample_1(out)
x = self.upsample_2(x)
x = self.conv_1x1_4(torch.cat([x, l2], 1))
return x |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.