code stringlengths 101 5.91M |
|---|
def per_class_iu(hist):
return (np.diag(hist) / ((hist.sum(axis=1) + hist.sum(axis=0)) - np.diag(hist))) |
def stirling_series(N):
with mpmath.workdps(100):
coeffs = [(mpmath.bernoulli((2 * n)) / ((2 * n) * ((2 * n) - 1))) for n in range(1, (N + 1))]
return coeffs |
def test_unequal_union():
union_1 = ak.from_iter([1, None, {'x': 2}, 3], highlevel=False)
union_2 = ak.from_iter([1, None, {'x': 2}, 2], highlevel=False)
assert (not union_1.is_equal_to(union_2)) |
class AutoModel(object):
def __init__(self):
raise EnvironmentError('AutoModel is designed to be instantiated using the `AutoModel.from_pretrained(pretrained_model_name_or_path)` or `AutoModel.from_config(config)` methods.')
def from_config(cls, config):
if isinstance(config, DistilBertConfig):
return DistilBertModel(config)
elif isinstance(config, RobertaConfig):
return RobertaModel(config)
elif isinstance(config, BertConfig):
return BertModel(config)
elif isinstance(config, OpenAIGPTConfig):
return OpenAIGPTModel(config)
elif isinstance(config, GPT2Config):
return GPT2Model(config)
elif isinstance(config, TransfoXLConfig):
return TransfoXLModel(config)
elif isinstance(config, XLNetConfig):
return XLNetModel(config)
elif isinstance(config, XLMConfig):
return XLMModel(config)
elif isinstance(config, CTRLConfig):
return CTRLModel(config)
elif isinstance(config, AlbertConfig):
return AlbertModel(config)
elif isinstance(config, CamembertConfig):
return CamembertModel(config)
elif isinstance(config, XLMRobertaConfig):
return XLMRobertaModel(config)
raise ValueError('Unrecognized configuration class {}'.format(config))
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
if ('t5' in pretrained_model_name_or_path):
return T5Model.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('distilbert' in pretrained_model_name_or_path):
return DistilBertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('albert' in pretrained_model_name_or_path):
return AlbertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('camembert' in pretrained_model_name_or_path):
return CamembertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('xlm-roberta' in pretrained_model_name_or_path):
return XLMRobertaModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('roberta' in pretrained_model_name_or_path):
return RobertaModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('bert' in pretrained_model_name_or_path):
return BertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('openai-gpt' in pretrained_model_name_or_path):
return OpenAIGPTModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('gpt2' in pretrained_model_name_or_path):
return GPT2Model.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('transfo-xl' in pretrained_model_name_or_path):
return TransfoXLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('xlnet' in pretrained_model_name_or_path):
return XLNetModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('xlm' in pretrained_model_name_or_path):
return XLMModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('ctrl' in pretrained_model_name_or_path):
return CTRLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of 'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', 'xlm-roberta', 'xlm', 'roberta, 'ctrl', 'distilbert', 'camembert', 'albert'".format(pretrained_model_name_or_path)) |
class TinyImageNetDataset(ShardDataset):
NUM_IMAGES_PER_CLASS = 500
def __init__(self, data_folder: Path, data_type='train', rank=1, worldsize=1):
self.data_type = data_type
self._common_data_folder = data_folder
self._data_folder = os.path.join(data_folder, data_type)
self.labels = {}
self.image_paths = sorted(glob.iglob(os.path.join(self._data_folder, '**', '*.JPEG'), recursive=True))[(rank - 1)::worldsize]
wnids_path = os.path.join(self._common_data_folder, 'wnids.txt')
with open(wnids_path, 'r', encoding='utf-8') as fp:
self.label_texts = sorted([text.strip() for text in fp.readlines()])
self.label_text_to_number = {text: i for (i, text) in enumerate(self.label_texts)}
self.fill_labels()
def __len__(self) -> int:
return len(self.image_paths)
def __getitem__(self, index: int) -> Tuple[('Image', int)]:
file_path = self.image_paths[index]
label = self.labels[os.path.basename(file_path)]
return (self.read_image(file_path), label)
def read_image(self, path: Path) -> Image:
img = Image.open(path)
return img
def fill_labels(self) -> None:
if (self.data_type == 'train'):
for (label_text, i) in self.label_text_to_number.items():
for cnt in range(self.NUM_IMAGES_PER_CLASS):
self.labels[f'{label_text}_{cnt}.JPEG'] = i
elif (self.data_type == 'val'):
val_annotations_path = os.path.join(self._data_folder, 'val_annotations.txt')
with open(val_annotations_path, 'r', encoding='utf-8') as fp:
for line in fp.readlines():
terms = line.split('\t')
(file_name, label_text) = (terms[0], terms[1])
self.labels[file_name] = self.label_text_to_number[label_text] |
class FNetTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'token_type_ids']
def __init__(self, vocab_file, do_lower_case=False, remove_space=True, keep_accents=True, unk_token='<unk>', sep_token='[SEP]', pad_token='<pad>', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> None:
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token)
self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs)
super().__init__(do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
def vocab_size(self):
return len(self.sp_model)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
if (not hasattr(self, 'sp_model_kwargs')):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def preprocess_text(self, inputs):
if self.remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace('``', '"').replace("''", '"')
if (not self.keep_accents):
outputs = unicodedata.normalize('NFKD', outputs)
outputs = ''.join([c for c in outputs if (not unicodedata.combining(c))])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text: str) -> List[str]:
text = self.preprocess_text(text)
pieces = self.sp_model.encode(text, out_type=str)
new_pieces = []
for piece in pieces:
if ((len(piece) > 1) and (piece[(- 1)] == str(',')) and piece[(- 2)].isdigit()):
cur_pieces = self.sp_model.EncodeAsPieces(piece[:(- 1)].replace(SPIECE_UNDERLINE, ''))
if ((piece[0] != SPIECE_UNDERLINE) and (cur_pieces[0][0] == SPIECE_UNDERLINE)):
if (len(cur_pieces[0]) == 1):
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[(- 1)])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
return new_pieces
def _convert_token_to_id(self, token):
return self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index):
return self.sp_model.IdToPiece(index)
def convert_tokens_to_string(self, tokens):
current_sub_tokens = []
out_string = ''
prev_is_special = False
for token in tokens:
if (token in self.all_special_tokens):
if (not prev_is_special):
out_string += ' '
out_string += (self.sp_model.decode(current_sub_tokens) + token)
prev_is_special = True
current_sub_tokens = []
else:
current_sub_tokens.append(token)
prev_is_special = False
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip()
def _decode(self, token_ids: List[int], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=None, spaces_between_special_tokens: bool=True, **kwargs) -> str:
self._decode_use_source_tokenizer = kwargs.pop('use_source_tokenizer', False)
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
sub_texts = []
current_sub_text = []
for token in filtered_tokens:
if (skip_special_tokens and (token in self.all_special_ids)):
continue
if (token in self.added_tokens_encoder):
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
current_sub_text = []
sub_texts.append(token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
if spaces_between_special_tokens:
text = re.sub('(<unk>) ', '\\1', ' '.join(sub_texts))
else:
text = ''.join(sub_texts)
clean_up_tokenization_spaces = (clean_up_tokenization_spaces if (clean_up_tokenization_spaces is not None) else self.clean_up_tokenization_spaces)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return ((cls + token_ids_0) + sep)
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
elif (not os.path.isfile(self.vocab_file)):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,) |
.parametrize('region_sparse,region_dense,base_sparse,base_dense,bias_sparse,bias_dense', [(0, 2, 0, 2, 0, 1), (0, 2, 0, 1, 0, 2), (0, 2, 0, 0, 1, 0), (0, 1, 1, 2, 1, 1), (0, 1, 1, 1, 1, 2), (0, 1, 1, 0, 2, 0), (1, 0, 2, 2, 2, 1), (2, 0, 2, 1, 2, 2), (2, 0, 2, 0, 0, 0)])
def test_MLRs(region_sparse, region_dense, base_sparse, base_dense, bias_sparse, bias_dense):
model_name = 'MLRs'
(region_x, y, region_feature_columns) = get_test_data(SAMPLE_SIZE, region_sparse, region_dense, prefix='region')
(base_x, y, base_feature_columns) = get_test_data(SAMPLE_SIZE, region_sparse, region_dense, prefix='base')
(bias_x, y, bias_feature_columns) = get_test_data(SAMPLE_SIZE, region_sparse, region_dense, prefix='bias')
model = MLR(region_feature_columns, base_feature_columns, bias_feature_columns=bias_feature_columns, device=get_device())
model.compile('adam', 'binary_crossentropy', metrics=['binary_crossentropy'])
print((model_name + ' test pass!')) |
class LeNet(nn.Module):
def __init__(self, pretrained=False, num_classes=10, input_size=28, **kwargs):
super(LeNet, self).__init__()
suffix = f'dim{input_size}_nc{num_classes}'
self.model_path = os.path.join(MODELS_DIR, f'lenet_mnist_{suffix}.pt')
assert (input_size in [28, 32]), 'Can only do LeNet on 28x28 or 32x32 for now.'
feat_dim = (((16 * 5) * 5) if (input_size == 32) else ((16 * 4) * 4))
self.feat_dim = feat_dim
self.num_classes = num_classes
if (input_size == 32):
self.conv1 = nn.Conv2d(1, 6, 3)
self.conv2 = nn.Conv2d(6, 16, 3)
elif (input_size == 28):
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
else:
raise ValueError()
self._init_classifier()
if pretrained:
state_dict = torch.load(self.model_path)
self.load_state_dict(state_dict)
def _init_classifier(self, num_classes=None):
num_classes = (self.num_classes if (num_classes is None) else num_classes)
self.classifier = nn.Sequential(nn.Linear(self.feat_dim, 120), nn.ReLU(), nn.Dropout(), nn.Linear(120, 84), nn.ReLU(), nn.Dropout(), nn.Linear(84, num_classes))
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view((- 1), self.num_flat_features(x))
return self.classifier(x)
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
def save(self):
state_dict = self.state_dict()
torch.save(state_dict, self.model_path) |
class MultiAgentEnv(object):
def __init__(self, batch_size=None, **kwargs):
args = kwargs['env_args']
if isinstance(args, dict):
args = convert(args)
self.args = args
if (getattr(args, 'seed', None) is not None):
self.seed = args.seed
self.rs = np.random.RandomState(self.seed)
def step(self, actions):
raise NotImplementedError
def get_obs(self):
raise NotImplementedError
def get_obs_agent(self, agent_id):
raise NotImplementedError
def get_obs_size(self):
raise NotImplementedError
def get_state(self):
raise NotImplementedError
def get_state_size(self):
raise NotImplementedError
def get_avail_actions(self):
raise NotImplementedError
def get_avail_agent_actions(self, agent_id):
raise NotImplementedError
def get_total_actions(self):
raise NotImplementedError
def get_stats(self):
raise NotImplementedError
def get_agg_stats(self, stats):
return {}
def reset(self):
raise NotImplementedError
def render(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
def seed(self, seed):
raise NotImplementedError
def get_env_info(self):
env_info = {'state_shape': self.get_state_size(), 'obs_shape': self.get_obs_size(), 'n_actions': self.get_total_actions(), 'n_agents': self.n_agents, 'episode_limit': self.episode_limit}
return env_info |
def meijering(image, sigmas=range(1, 10, 2), alpha=None, black_ridges=True, mode='reflect', cval=0):
image = image.astype(_supported_float_type(image.dtype), copy=False)
if (not black_ridges):
image = (- image)
if (alpha is None):
alpha = (1 / (image.ndim + 1))
mtx = linalg.circulant([1, *([alpha] * (image.ndim - 1))]).astype(image.dtype)
filtered_max = np.zeros_like(image)
for sigma in sigmas:
eigvals = hessian_matrix_eigvals(hessian_matrix(image, sigma, mode=mode, cval=cval, use_gaussian_derivatives=True))
vals = np.tensordot(mtx, eigvals, 1)
vals = np.take_along_axis(vals, abs(vals).argmax(0)[None], 0).squeeze(0)
vals = np.maximum(vals, 0)
max_val = vals.max()
if (max_val > 0):
vals /= max_val
filtered_max = np.maximum(filtered_max, vals)
return filtered_max |
def single_pinyin(han, style, heteronym, errors='default', strict=True):
return _default_convert._single_pinyin(han, style, heteronym, errors=errors, strict=strict) |
def iter_slices(string, slice_length):
pos = 0
if ((slice_length is None) or (slice_length <= 0)):
slice_length = len(string)
while (pos < len(string)):
(yield string[pos:(pos + slice_length)])
pos += slice_length |
def AtLeast(*args):
args = _get_args(args)
if z3_debug():
_z3_assert((len(args) > 1), 'Non empty list of arguments expected')
ctx = _ctx_from_ast_arg_list(args)
if z3_debug():
_z3_assert((ctx is not None), 'At least one of the arguments must be a Z3 expression')
args1 = _coerce_expr_list(args[:(- 1)], ctx)
k = args[(- 1)]
(_args, sz) = _to_ast_array(args1)
return BoolRef(Z3_mk_atleast(ctx.ref(), sz, _args, k), ctx) |
def test_construct_schema_2positional():
def fun(x: int, y: float):
pass
model_type = schema.construct_schema('FunSchema', fun, skip_first_arg=False)
assert (model_type({'x': 5, 'y': 2}).to_native() == {'x': 5, 'y': 2}) |
class EvalConfig():
dataset: str = 'kspon'
dataset_path: str = ''
transcripts_path: str = '../../../data/eval_transcript.txt'
model_path: str = ''
output_unit: str = 'character'
batch_size: int = 32
num_workers: int = 4
print_every: int = 20
decode: str = 'greedy'
k: int = 3
use_cuda: bool = True |
class LapCore(CPAlgorithm):
def __init__(self, beta=0.1):
self.beta = beta
def detect(self, G):
(A, nodelabel) = utils.to_adjacency_matrix(G)
x = self._lap_core(A)
Q = self._score(A, None, x)
self.nodelabel = nodelabel
self.c_ = np.zeros(A.shape[0]).astype(int)
self.x_ = x.astype(int)
self.Q_ = np.sum(Q)
self.qs_ = Q
def _score(self, A, c, x):
N = A.shape[0]
Mcc = (np.dot((x.T * A), x) / 2)
Mcp = np.dot((x.T * A), (1 - x))
Mpp = (np.dot((x.T * A), x) / 2)
i = np.sum(x)
if ((i < 2) or (i > (N - 2))):
return [0.0]
q = (((Mcc / float(((i * (i - 1)) / 2))) + (Mcp / float((i * (N - i))))) - (Mpp / float((((N - i) * ((N - i) - 1)) / 2))))
return [q]
def _find_cut(self, A, score, b):
N = A.shape[0]
qc = np.zeros(N)
qp = np.zeros(N)
od = (- score).argsort()
for i in range(b, (N - b)):
x = np.zeros((N, 1))
x[od[0:i]] = 1
Mcc = (np.dot((x.T * A), x)[(0, 0)] / 2)
Mcp = np.dot((x.T * A), (1 - x))[(0, 0)]
Mpp = (np.dot(((1 - x).T * A), (1 - x))[(0, 0)] / 2)
qc[i] = (((Mcc / float(((i * (i - 1)) / 2))) + (Mcp / float((i * (N - i))))) - (Mpp / float((((N - i) * ((N - i) - 1)) / 2))))
qp[i] = (((Mcp / float((i * (N - i)))) + (Mpp / float((((N - i) * ((N - i) - 1)) / 2)))) - (Mcc / float(((i * (i - 1)) / 2))))
idx_c = np.argmax(qc)
idx_p = np.argmax(qp)
if (qc[idx_c] > qp[idx_p]):
Q = qc[idx_c]
x = np.zeros(N)
x[od[0:idx_c]] = 1
else:
Q = qc[idx_p]
x = np.ones(N)
x[od[0:idx_p]] = 0
Q = (Q / N)
return x
def _lap_core(self, A):
N = A.shape[0]
deg = np.array(A.sum(axis=1)).reshape((- 1))
denom = np.zeros(N)
denom[(deg > 0)] = (1.0 / (deg[(deg > 0)] + 1.0))
T = ((diags(denom) * A) - diags(np.ones(N)))
(d, v) = eigs(T, k=1, which='SR')
x = self._find_cut(A, v.T[0], int(np.round((N * self.beta))))
return x |
def _is_packed(dtype):
total_offset = 0
for name in dtype.names:
(fld_dtype, fld_offset, title) = _unpack_field(*dtype.fields[name])
if (fld_offset != total_offset):
return False
total_offset += fld_dtype.itemsize
if (total_offset != dtype.itemsize):
return False
return True |
def get_model(point_cloud, is_training, num_class, bn_decay=None, gripper_feat=None, env_feat=None):
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
l0_xyz = point_cloud
l0_points = None
end_points['l0_xyz'] = l0_xyz
(l1_xyz, l1_points, l1_indices) = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.05, nsample=32, mlp=[32, 64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
(l2_xyz, l2_points, l2_indices) = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.02, nsample=32, mlp=[64, 128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
(l3_xyz, l3_points, l3_indices) = pointnet_sa_module(l2_xyz, l2_points, npoint=64, radius=0.04, nsample=32, mlp=[128, 256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [128, 128], is_training, bn_decay, scope='fa_layer2')
l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [128, 128], is_training, bn_decay, scope='fa_layer3')
l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128, 64], is_training, bn_decay, scope='fa_layer4')
net = tf_util.conv1d(l0_points, 64, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
end_points['feats'] = net
net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2')
return (net, end_points) |
class DomainTransitionGraph():
def __init__(self, init, size):
self.init = init
self.size = size
self.arcs = defaultdict(set)
def add_arc(self, u, v):
self.arcs[u].add(v)
def reachable(self):
queue = [self.init]
reachable = set(queue)
while queue:
node = queue.pop()
new_neighbors = (self.arcs.get(node, set()) - reachable)
reachable |= new_neighbors
queue.extend(new_neighbors)
return reachable
def dump(self):
print('DTG size:', self.size)
print('DTG init value:', self.init)
print('DTG arcs:')
for (source, destinations) in sorted(self.arcs.items()):
for destination in sorted(destinations):
print((' %d => %d' % (source, destination))) |
class Saver():
def __init__(self, opts):
self.display_dir = os.path.join(opts.display_dir, opts.name)
self.model_dir = os.path.join(opts.result_dir, opts.name)
self.image_dir = os.path.join(self.model_dir, 'images')
self.display_freq = opts.display_freq
self.img_save_freq = opts.img_save_freq
self.model_save_freq = opts.model_save_freq
if (not os.path.exists(self.display_dir)):
os.makedirs(self.display_dir)
if (not os.path.exists(self.model_dir)):
os.makedirs(self.model_dir)
if (not os.path.exists(self.image_dir)):
os.makedirs(self.image_dir)
self.writer = SummaryWriter(log_dir=self.display_dir)
def write_display(self, total_it, model):
if (((total_it + 1) % self.display_freq) == 0):
members = [attr for attr in dir(model) if ((not callable(getattr(model, attr))) and (not attr.startswith('__')) and ('loss' in attr))]
for m in members:
self.writer.add_scalar(m, getattr(model, m), total_it)
image_dis = ((torchvision.utils.make_grid(model.image_display, nrow=(model.image_display.size(0) // 2)) / 2) + 0.5)
self.writer.add_image('Image', image_dis, total_it)
def write_img(self, ep, model):
if (((ep + 1) % self.img_save_freq) == 0):
assembled_images = model.assemble_outputs()
img_filename = ('%s/gen_%05d.jpg' % (self.image_dir, ep))
torchvision.utils.save_image(((assembled_images / 2) + 0.5), img_filename, nrow=1)
elif (ep == (- 1)):
assembled_images = model.assemble_outputs()
img_filename = ('%s/gen_last.jpg' % (self.image_dir, ep))
torchvision.utils.save_image(((assembled_images / 2) + 0.5), img_filename, nrow=1)
def write_model(self, ep, total_it, model):
if (((ep + 1) % self.model_save_freq) == 0):
print(('--- save the model ep %d ---' % ep))
model.save(('%s/%05d.pth' % (self.model_dir, ep)), ep, total_it)
else:
model.save(('%s/last.pth' % self.model_dir), ep, total_it) |
class TD_LSTM(nn.Module):
def __init__(self, embedding_matrix, opt):
super(TD_LSTM, self).__init__()
self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
self.lstm_l = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True)
self.lstm_r = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True)
self.dense = nn.Linear((opt.hidden_dim * 2), opt.polarities_dim)
def forward(self, inputs):
(x_l, x_r) = (inputs[0], inputs[1])
(x_l_len, x_r_len) = (torch.sum((x_l != 0), dim=(- 1)), torch.sum((x_r != 0), dim=(- 1)))
(x_l, x_r) = (self.embed(x_l), self.embed(x_r))
(_, (h_n_l, _)) = self.lstm_l(x_l, x_l_len)
(_, (h_n_r, _)) = self.lstm_r(x_r, x_r_len)
h_n = torch.cat((h_n_l[0], h_n_r[0]), dim=(- 1))
out = self.dense(h_n)
return out |
def get_eval_dataset(dataset_name, num_shots, seed=42):
top_k = 1
top_p = 0
temperature = 1
num_shots = num_shots
max_new_tokens = 20
shuffle_train = True
eval_func = eval_func_default
pred_postprocess_func = pred_postprocess_default
if (dataset_name == 'trivia_qa'):
dataset = load_dataset(dataset_name, name='rc.nocontext')
dataset_train = dataset['train']
dataset_val = dataset['validation']
input_key = 'question'
output_key = 'answer'
def prompt_transform(ex, context_exs):
prompt = '\n\n'.join([f'''Question: {c_ex[input_key]}
Answer: {c_ex[output_key]['aliases'][0]}''' for c_ex in context_exs])
prompt += f'''
Question: {ex[input_key]}
Answer:'''
answer_list = ex[output_key]['aliases']
return {'prompt': prompt, 'answer': answer_list}
elif (dataset_name == 'natural_questions'):
dataset = load_dataset('lucadiliello/naturalquestionsshortqa')
dataset_train = dataset['train']
dataset_val = dataset['validation']
def prompt_transform(ex, context_exs):
prompt = '\n\n'.join([f'''Q: {c_ex['question']}?
A: {c_ex['answers'][0]}''' for c_ex in context_exs])
prompt += f'''
Q: {ex['question']}?
A:'''
answer_list = ex['answers']
return {'prompt': prompt, 'answer': answer_list}
elif (dataset_name == 'web_questions'):
dataset = load_dataset(dataset_name)
dataset_train = dataset['train']
dataset_val = dataset['test']
def prompt_transform(ex, context_exs):
prompt = '\n\n'.join([f'''Question: {c_ex['question']}
Answer: {c_ex['answers'][0]}''' for c_ex in context_exs])
prompt += f'''
Question: {ex['question']}
Answer:'''
answer_list = ex['answers']
return {'prompt': prompt, 'answer': answer_list}
elif (dataset_name == 'lambada'):
dataset = load_dataset(dataset_name)
dataset_train = dataset['validation']
dataset_val = dataset['test']
def prompt_transform(ex, context_exs):
words = ex['text'].split(' ')
ex_input = ' '.join(words[:(- 1)])
ex_answer = words[(- 1)]
context_ex_toks = [c_ex['text'].split(' ') for c_ex in context_exs]
prompt = '\n\n'.join([f'''Input: {' '.join(c_ex_toks[:(- 1)])}
Output: {c_ex_toks[(- 1)]}''' for c_ex_toks in context_ex_toks])
prompt += f'''
Input: {ex_input}
Output:'''
prompt = ('Complete the following sentences.\n\n' + prompt)
answer_list = [ex_answer]
return {'prompt': prompt, 'answer': answer_list}
elif (dataset_name == 'squad_v2'):
dataset = load_dataset(dataset_name)
shuffle_train = False
dataset_val = dataset['validation']
dataset_val_chunks = []
dataset_train_chunks = []
all_titles = set([ex['title'] for ex in dataset_val])
for (i, title) in enumerate(all_titles):
title_dataset_val = dataset_val.filter((lambda x: (x['title'] == title))).shuffle((seed + i))
title_dataset_train = title_dataset_val.select(list(reversed(range(len(title_dataset_val)))))
assert (len(title_dataset_train) == len(title_dataset_val))
dataset_train_chunks.append(title_dataset_train)
dataset_val_chunks.append(title_dataset_val)
dataset_train = concatenate_datasets(dataset_train_chunks)
dataset_val = concatenate_datasets(dataset_val_chunks)
def prompt_transform(ex, context_exs):
for c_ex in ([ex] + context_exs):
if (len(c_ex['answers']['text']) == 0):
c_ex['answers']['text'] = ['unanswerable']
assert (c_ex['title'] == ex['title'])
prompt = f'''Title: {ex['title']}
Background: {ex['context']}
'''
prompt += '\n\n'.join([f'''Question: {c_ex['question']}
Answer (use Background or answer "unanswerable"): {c_ex['answers']['text'][0]}'''])
prompt += f'''
Question: {ex['question']}
Answer (use Background or answer "unanswerable"):'''
answer_list = ex['answers']['text']
return {'prompt': prompt, 'answer': answer_list}
def eval_func(answer, pred, prompt, model, tokenizer, inputs, trainer):
if (not isinstance(answer, list)):
answer = [answer.strip().lower().translate(str.maketrans('', '', string.punctuation))]
else:
answer = [a.strip().lower().translate(str.maketrans('', '', string.punctuation)) for a in answer]
return (pred in answer)
else:
raise ValueError(f'Dataset {dataset_name} not supported')
return {'top_k': top_k, 'top_p': top_p, 'temperature': temperature, 'num_shots': num_shots, 'max_new_tokens': max_new_tokens, 'prompt_transform': prompt_transform, 'dataset_train': dataset_train, 'shuffle_train': shuffle_train, 'dataset_val': dataset_val, 'eval_func': eval_func, 'pred_postprocess_func': pred_postprocess_func} |
def load_experts(expert_files, flatten=True):
transitions = []
for file in tqdm(expert_files):
with open(file, 'rb') as f:
new_trajectories = pickle.load(f)
transitions += new_trajectories
if flatten:
transitions = flatten_trajectories(transitions)
return transitions |
def cover_and_relations_from_invariants(invs):
n = len(invs)
A = (ZZ ** n)
B = A.span([(A.gen(i) * invs[i]) for i in range(n)])
return (A, B) |
def check_and_enlist_bcs(bcs_list: Union[(fenics.DirichletBC, List[fenics.DirichletBC], List[List[fenics.DirichletBC]])]) -> List[List[fenics.DirichletBC]]:
if isinstance(bcs_list, fenics.DirichletBC):
return [[bcs_list]]
elif (isinstance(bcs_list, list) and (len(bcs_list) == 0)):
return [bcs_list]
elif (isinstance(bcs_list, list) and isinstance(bcs_list[0], fenics.DirichletBC)):
return [bcs_list]
elif (isinstance(bcs_list, list) and isinstance(bcs_list[0], list)):
return bcs_list
else:
raise _exceptions.InputError('cashocs._utils.check_and_enlist_bcs', 'bcs_list', 'Type of bcs_list is wrong') |
def main():
graph = electrical()
params = {'runs': 1, 'steps': 100, 'seed': 1, 'l': 0.8, 'r': 0.2, 'c': int((0.1 * len(graph))), 'k_a': 5, 'attack': 'id_node', 'attack_approx': None, 'k_d': 0, 'defense': None, 'robust_measure': 'largest_connected_component', 'plot_transition': False, 'gif_animation': True, 'gif_snaps': True, 'edge_style': None, 'node_style': 'spectral', 'fa_iter': 2000}
cf = Cascading(graph, **params)
results = cf.run_simulation()
cf.plot_results(results) |
class TestChannelStatsOp(serial.SerializedTestCase):
def channel_stats_nchw_ref(self, X):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, (- 1))
sum1 = np.sum(X, axis=(0, 2), keepdims=False)
sum2 = np.sum((X ** 2), axis=(0, 2), keepdims=False)
return (sum1, sum2)
def channel_stats_nhwc_ref(self, X):
dims = X.shape
N = dims[0]
C = dims[(- 1)]
X = X.reshape(N, (- 1), C)
sum1 = np.sum(X, axis=(0, 1), keepdims=False)
sum2 = np.sum((X ** 2), axis=(0, 1), keepdims=False)
return (sum1, sum2)
(N=st.integers(1, 5), C=st.integers(1, 10), H=st.integers(1, 12), W=st.integers(1, 12), order=st.sampled_from(['NCHW', 'NHWC']), **hu.gcs)
(deadline=10000)
def test_channel_stats_2d(self, N, C, H, W, order, gc, dc):
op = core.CreateOperator('ChannelStats', ['X'], ['sum', 'sumsq'], order=order)
def ref_op(X):
if (order == 'NCHW'):
return self.channel_stats_nchw_ref(X)
else:
return self.channel_stats_nhwc_ref(X)
X = np.random.randn(N, C, H, W).astype(np.float32)
if (order == 'NHWC'):
X = np.transpose(X, [0, 2, 3, 1])
self.assertReferenceChecks(gc, op, [X], reference=ref_op)
self.assertDeviceChecks(dc, op, [X], [0, 1])
(N=st.integers(1, 5), C=st.integers(1, 10), D=st.integers(1, 6), H=st.integers(1, 6), W=st.integers(1, 6), order=st.sampled_from(['NCHW', 'NHWC']), **hu.gcs)
(deadline=10000)
def test_channel_stats_3d(self, N, C, D, H, W, order, gc, dc):
op = core.CreateOperator('ChannelStats', ['X'], ['sum', 'sumsq'], order=order)
def ref_op(X):
if (order == 'NCHW'):
return self.channel_stats_nchw_ref(X)
else:
return self.channel_stats_nhwc_ref(X)
X = np.random.randn(N, C, D, H, W).astype(np.float32)
if (order == 'NHWC'):
X = np.transpose(X, [0, 2, 3, 4, 1])
self.assertReferenceChecks(gc, op, [X], reference=ref_op)
self.assertDeviceChecks(dc, op, [X], [0, 1]) |
class Network(nn.Module):
def __init__(self, init_ch, dataset, config):
super(Network, self).__init__()
self.config = config
self._C_input = init_ch
self._head_dim = self.config.optim.head_dim
self._dataset = dataset
self.initialize()
def initialize(self):
self._init_block_config()
self._create_output_list()
self._create_input_list()
self._init_betas()
self._init_alphas()
self._init_sample_branch()
def init_model(self, model_init='he_fout', init_div_groups=True):
for m in self.modules():
if isinstance(m, nn.Conv2d):
if (model_init == 'he_fout'):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
if init_div_groups:
n /= m.groups
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif (model_init == 'he_fin'):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.in_channels)
if init_div_groups:
n /= m.groups
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
else:
raise NotImplementedError
elif isinstance(m, nn.BatchNorm2d):
if (m.affine == True):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
if (m.affine == True):
m.weight.data.fill_(1)
m.bias.data.zero_()
def set_bn_param(self, bn_momentum, bn_eps):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.momentum = bn_momentum
m.eps = bn_eps
return
def _init_betas(self):
self.beta_weights = nn.ParameterList()
for block in self.output_configs:
num_betas = len(block['out_chs'])
self.beta_weights.append(nn.Parameter((0.001 * torch.randn(num_betas))))
def _init_alphas(self):
self.alpha_head_weights = nn.ParameterList()
self.alpha_stack_weights = nn.ParameterList()
for block in self.input_configs[:(- 1)]:
num_head_alpha = len(block['in_block_idx'])
self.alpha_head_weights.append(nn.Parameter((0.001 * torch.randn(num_head_alpha, len(self.config.search_params.PRIMITIVES_head)))))
num_layers = block['num_stack_layers']
self.alpha_stack_weights.append(nn.Parameter((0.001 * torch.randn(num_layers, len(self.config.search_params.PRIMITIVES_stack)))))
def arch_parameters(self):
arch_params = nn.ParameterList()
arch_params.extend(self.beta_weights)
arch_params.extend(self.alpha_head_weights)
arch_params.extend(self.alpha_stack_weights)
return arch_params
def arch_beta_params(self):
return self.beta_weights
def arch_alpha_params(self):
alpha_params = nn.ParameterList()
alpha_params.extend(self.alpha_head_weights)
alpha_params.extend(self.alpha_stack_weights)
return alpha_params
def display_arch_params(self, display=True):
branch_weights = []
head_op_weights = []
stack_op_weights = []
for betas in self.beta_weights:
branch_weights.append(F.softmax(betas, dim=(- 1)))
for head_alpha in self.alpha_head_weights:
head_op_weights.append(F.softmax(head_alpha, dim=(- 1)))
for stack_alpha in self.alpha_stack_weights:
stack_op_weights.append(F.softmax(stack_alpha, dim=(- 1)))
if display:
logging.info(('branch_weights \n' + '\n'.join(map(str, branch_weights))))
if (len(self.config.search_params.PRIMITIVES_head) > 1):
logging.info(('head_op_weights \n' + '\n'.join(map(str, head_op_weights))))
logging.info(('stack_op_weights \n' + '\n'.join(map(str, stack_op_weights))))
return ([x.tolist() for x in branch_weights], [x.tolist() for x in head_op_weights], [x.tolist() for x in stack_op_weights])
def _init_sample_branch(self):
(_, _) = self.sample_branch('head', 1, training=False)
(_, _) = self.sample_branch('stack', 1, training=False)
def sample_branch(self, params_type, sample_num, training=True, search_stage=1, if_sort=True):
def sample(param, weight, sample_num, sample_policy='prob', if_sort=True):
if (sample_num >= weight.shape[(- 1)]):
sample_policy = 'all'
assert (param.shape == weight.shape)
assert (sample_policy in ['prob', 'uniform', 'all'])
if (param.shape[0] == 0):
return ([], [])
if (sample_policy == 'prob'):
sampled_index = torch.multinomial(weight, num_samples=sample_num, replacement=False)
elif (sample_policy == 'uniform'):
weight = torch.ones_like(weight)
sampled_index = torch.multinomial(weight, num_samples=sample_num, replacement=False)
else:
sampled_index = torch.arange(start=0, end=weight.shape[(- 1)], step=1, device=weight.device).repeat(param.shape[0], 1)
if if_sort:
(sampled_index, _) = torch.sort(sampled_index, descending=False)
sampled_param_old = torch.gather(param, dim=(- 1), index=sampled_index)
return (sampled_param_old, sampled_index)
if (params_type == 'head'):
params = self.alpha_head_weights
elif (params_type == 'stack'):
params = self.alpha_stack_weights
else:
raise TypeError
weights = []
sampled_params_old = []
sampled_indices = []
if training:
sample_policy = (self.config.search_params.sample_policy if (search_stage == 1) else 'uniform')
else:
sample_policy = 'all'
for param in params:
weights.append(F.softmax(param, dim=(- 1)))
for (param, weight) in zip(params, weights):
(sampled_param_old, sampled_index) = sample(param, weight, sample_num, sample_policy, if_sort)
sampled_params_old.append(sampled_param_old)
sampled_indices.append(sampled_index)
if (params_type == 'head'):
self.alpha_head_index = sampled_indices
elif (params_type == 'stack'):
self.alpha_stack_index = sampled_indices
return (sampled_params_old, sampled_indices)
def _init_block_config(self):
self.block_chs = self.config.search_params.net_scale.chs
self.block_fm_sizes = self.config.search_params.net_scale.fm_sizes
self.num_blocks = (len(self.block_chs) - 1)
self.num_block_layers = self.config.search_params.net_scale.num_layers
if hasattr(self.config.search_params.net_scale, 'stage'):
self.block_stage = self.config.search_params.net_scale.stage
self.block_chs.append(self.config.optim.last_dim)
self.block_fm_sizes.append(self.block_fm_sizes[(- 1)])
self.num_block_layers.append(0)
def _create_output_list(self):
self.output_configs = []
for i in range((len(self.block_chs) - 1)):
if hasattr(self, 'block_stage'):
stage = self.block_stage[i]
output_config = {'ch': self.block_chs[i], 'fm_size': self.block_fm_sizes[i], 'out_chs': [], 'out_fms': [], 'strides': [], 'out_id': [], 'num_stack_layers': self.num_block_layers[i]}
for j in range(self.config.search_params.adjoin_connect_nums[stage]):
out_index = ((i + j) + 1)
if (out_index >= len(self.block_chs)):
break
if hasattr(self, 'block_stage'):
block_stage = getattr(self, 'block_stage')
if ((block_stage[out_index] - block_stage[i]) > 1):
break
fm_size_ratio = (self.block_fm_sizes[i] / self.block_fm_sizes[out_index])
if (fm_size_ratio == 2):
output_config['strides'].append(2)
elif (fm_size_ratio == 1):
output_config['strides'].append(1)
else:
break
output_config['out_chs'].append(self.block_chs[out_index])
output_config['out_fms'].append(self.block_fm_sizes[out_index])
output_config['out_id'].append(out_index)
self.output_configs.append(output_config)
logging.info(('Network output configs: \n' + '\n'.join(map(str, self.output_configs))))
def _create_input_list(self):
self.input_configs = []
for i in range(1, len(self.block_chs)):
input_config = {'ch': self.block_chs[i], 'fm_size': self.block_fm_sizes[i], 'in_chs': [], 'in_fms': [], 'strides': [], 'in_block_idx': [], 'beta_idx': [], 'num_stack_layers': self.num_block_layers[i]}
for j in range(i):
in_index = ((i - j) - 1)
if (in_index < 0):
break
output_config = self.output_configs[in_index]
if (i in output_config['out_id']):
beta_idx = output_config['out_id'].index(i)
input_config['in_block_idx'].append(in_index)
input_config['in_chs'].append(output_config['ch'])
input_config['in_fms'].append(output_config['fm_size'])
input_config['beta_idx'].append(beta_idx)
input_config['strides'].append(output_config['strides'][beta_idx])
else:
continue
self.input_configs.append(input_config)
logging.info(('Network input configs: \n' + '\n'.join(map(str, self.input_configs))))
def get_cost_list(self, data_shape, cost_type='flops', use_gpu=True, meas_times=1000):
cost_list = []
block_datas = []
total_cost = 0
if (cost_type == 'flops'):
cost_func = (lambda module, data: comp_multadds_fw(module, data, use_gpu))
elif (cost_type == 'latency'):
cost_func = (lambda module, data: latency_measure_fw(module, data, meas_times))
else:
raise NotImplementedError
if (len(data_shape) == 3):
input_data = torch.randn(((1,) + tuple(data_shape)))
else:
input_data = torch.randn(tuple(data_shape))
if use_gpu:
input_data = input_data.cuda()
(cost, block_data) = cost_func(self.input_block, input_data)
cost_list.append(cost)
block_datas.append(block_data)
total_cost += cost
if hasattr(self, 'head_block'):
(cost, block_data) = cost_func(self.head_block, block_data)
cost_list[0] += cost
block_datas[0] = block_data
block_flops = []
for (block_id, block) in enumerate(self.blocks):
input_config = self.input_configs[block_id]
inputs = [block_datas[i] for i in input_config['in_block_idx']]
head_branch_flops = []
for (branch_id, head_branch) in enumerate(block.head_layer.head_branches):
op_flops = []
for op in head_branch._ops:
(cost, block_data) = cost_func(op, inputs[branch_id])
op_flops.append(cost)
total_cost += cost
head_branch_flops.append(op_flops)
stack_layer_flops = []
if (block.stack_layers.stack_layers is not None):
for stack_layer in block.stack_layers.stack_layers:
op_flops = []
for op in stack_layer._ops:
(cost, block_data) = cost_func(op, block_data)
if (isinstance(op, operations.Skip) and self.config.optim.sub_obj.skip_reg):
cost = (op_flops[0] / 10.0)
op_flops.append(cost)
total_cost += cost
stack_layer_flops.append(op_flops)
block_flops.append([head_branch_flops, stack_layer_flops])
block_datas.append(block_data)
cost_list.append(block_flops)
conv1_1_flops = []
input_config = self.input_configs[(- 1)]
inputs = [block_datas[i] for i in input_config['in_block_idx']]
for (branch_id, branch) in enumerate(self.conv1_1_block.conv1_1_branches):
(cost, block_data) = cost_func(branch, inputs[branch_id])
conv1_1_flops.append(cost)
total_cost += cost
block_datas.append(block_data)
cost_list.append(conv1_1_flops)
out = block_datas[(- 1)]
(cost, out) = cost_func(self.classifier, out.permute(0, 2, 3, 1).contiguous())
cost_list.append(cost)
total_cost += cost
return (cost_list, total_cost) |
def test_attri2vec_apply():
attri2vec = Attri2Vec(layer_sizes=[2, 2, 2], bias=False, input_dim=2, node_num=4, multiplicity=2, activation='linear', normalize=None)
model = keras.Model(*attri2vec.in_out_tensors())
model.set_weights([np.ones_like(w) for w in model.get_weights()])
x = np.array([[1, 2]])
expected = np.array([[12, 12]])
inp = keras.Input(shape=(2,))
out = attri2vec(inp)
model1 = keras.Model(inputs=inp, outputs=out)
actual = model1.predict(x)
assert (expected == pytest.approx(actual))
(xinp, xout) = attri2vec.in_out_tensors(multiplicity=1)
model2 = keras.Model(inputs=xinp, outputs=xout)
assert (pytest.approx(expected) == model2.predict(x))
x1 = np.array([[3, 1]])
x2 = np.array([[2]])
y1 = np.array([[16, 16]])
y2 = np.array([[1, 1]])
(xinp, xout) = attri2vec.in_out_tensors()
model3 = keras.Model(inputs=xinp, outputs=xout)
actual = model3.predict([x1, x2])
assert (pytest.approx(y1) == actual[0])
assert (pytest.approx(y2) == actual[1])
(xinp, xout) = attri2vec.in_out_tensors()
model4 = keras.Model(inputs=xinp, outputs=xout)
actual = model4.predict([x1, x2])
assert (pytest.approx(y1) == actual[0])
assert (pytest.approx(y2) == actual[1]) |
def find_all_linear_names(peft_model, int4=False, int8=False):
cls = torch.nn.Linear
if (int4 or int8):
import bitsandbytes as bnb
if int4:
cls = bnb.nn.Linear4bit
elif int8:
cls = bnb.nn.Linear8bitLt
lora_module_names = set()
for (name, module) in peft_model.named_modules():
if isinstance(module, cls):
if ('lm_head' in name):
continue
if ('score' in name):
continue
names = name.split('.')
lora_module_names.add((names[0] if (len(names) == 1) else names[(- 1)]))
return sorted(lora_module_names) |
class FractionFieldEmbeddingSection(Section):
def _call_(self, x, check=True):
codom = self.codomain()
if (self.domain()._R is codom):
num = x.numerator()
den = x.denominator()
else:
num = codom(x.numerator())
den = codom(x.denominator())
if (codom.is_exact() and den.is_one()):
return num
if (check and (not den.is_unit())):
raise TypeError('fraction must have unit denominator')
return (num * den.inverse_of_unit())
def _call_with_args(self, x, args=(), kwds={}):
check = kwds.get('check', True)
if (args or any(((key != 'check') for key in kwds))):
raise NotImplementedError('__call__ cannot be called with additional arguments other than check=True/False')
return self._call_(x, check=check)
def _richcmp_(self, other, op):
if (type(self) is not type(other)):
return NotImplemented
return richcmp((self.domain(), self.codomain()), (other.domain(), other.codomain()), op)
def __hash__(self):
return hash((type(self), self.codomain())) |
.typeof_impl.register(RecordView)
def typeof_RecordView(obj, c):
return RecordViewType(numba.typeof(obj.arrayview)) |
def partial_ld_offset():
return (((12 + (4 * (np.dtype('uint64').alignment > 4))) + 8) + (8 * (np.dtype('longdouble').alignment > 8))) |
class TFAlbertForMaskedLM(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class EvalConfig(Config):
evaluate: bool = True
n_eval_episodes: int = 100
eval_random: bool = False
name: str = 'eval'
vary_map_shapes: bool = False |
class AckleyBenchmark(Benchmark):
def __init__(self, nb_features: int=2):
self.nb_features = nb_features
ind_domain = ((- 32.768), 32.768)
super().__init__(fn=algorithms.partial(illumination_ackley, nb_features=nb_features), ind_domain=ind_domain, fitness_domain=((0.0, math.inf),), features_domain=((ind_domain,) * nb_features), default_task='minimisation') |
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return (tensor if (pos is None) else (tensor + pos))
def forward_post(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]
tgt = (tgt + self.dropout1(tgt2))
tgt = self.norm1(tgt)
(tgt2, att) = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)
tgt = (tgt + self.dropout2(tgt2))
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = (tgt + self.dropout3(tgt2))
tgt = self.norm3(tgt)
return (tgt, att)
def forward_pre(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0]
tgt = (tgt + self.dropout1(tgt2))
tgt2 = self.norm2(tgt)
(tgt2, att) = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)
tgt = (tgt + self.dropout2(tgt2))
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = (tgt + self.dropout3(tgt2))
return (tgt, att)
def forward(self, tgt, memory, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) |
class ReshapeChannel(Channel):
def __init__(self, prev_shape, next_shape):
self.prev_shape = prev_shape
self.next_shape = next_shape
self.repr_init()
def sample(self, Z):
return Z.reshape(self.next_shape)
def math(self):
return '$\\delta$'
def second_moment(self, tau_z):
return tau_z
def compute_forward_message(self, az, bz, ax, bx):
return (az, bz.reshape(self.next_shape))
def compute_backward_message(self, az, bz, ax, bx):
return (ax, bx.reshape(self.prev_shape))
def compute_forward_state_evolution(self, az, ax, tau_z):
return az
def compute_backward_state_evolution(self, az, ax, tau_z):
return ax
def compute_log_partition(self, az, bz, ax, bx):
a = (az + ax)
b = (bz + bx.rehape(self.prev_shape))
logZ = (0.5 * np.sum((((b ** 2) / a) + np.log(((2 * np.pi) / a)))))
return logZ
def compute_mutual_information(self, az, ax, tau_z):
a = (ax + az)
I = (0.5 * np.log((a * tau_z)))
return I
def compute_free_energy(self, az, ax, tau_z):
tau_x = self.second_moment(tau_z)
I = self.compute_mutual_information(az, ax, tau_z)
A = (((0.5 * ((az * tau_z) + (ax * tau_x))) - I) + (0.5 * np.log((((2 * np.pi) * tau_z) / np.e))))
return A |
class SkewPartitions_all(SkewPartitions):
def __init__(self):
SkewPartitions.__init__(self, True)
def _repr_(self):
return 'Skew partitions'
def __iter__(self):
n = 0
while True:
for p in SkewPartitions_n(n):
(yield self.element_class(self, p))
n += 1 |
def check_install_build_global(options, check_options=None):
if (check_options is None):
check_options = options
def getname(n):
return getattr(check_options, n, None)
names = ['build_options', 'global_options', 'install_options']
if any(map(getname, names)):
control = options.format_control
fmt_ctl_no_binary(control)
warnings.warn('Disabling all use of wheels due to the use of --build-options / --global-options / --install-options.', stacklevel=2) |
class TentPreBN(TentFull):
def configure_model_optimizer(self, algorithm, alpha):
adapted_algorithm = copy.deepcopy(algorithm)
adapted_algorithm.classifier = PreBN(adapted_algorithm.classifier, adapted_algorithm.featurizer.n_outputs)
adapted_algorithm.network = torch.nn.Sequential(adapted_algorithm.featurizer, adapted_algorithm.classifier)
optimizer = torch.optim.Adam(adapted_algorithm.classifier.bn.parameters(), lr=(algorithm.hparams['lr'] * alpha), weight_decay=algorithm.hparams['weight_decay'])
return (adapted_algorithm, optimizer) |
def compute_eisenstein_params(character, k):
if isinstance(character, (int, Integer)):
return __find_eisen_chars_gamma1(character, k)
elif isinstance(character, GammaH_class):
return __find_eisen_chars_gammaH(character.level(), character._generators_for_H(), k)
else:
return __find_eisen_chars(character, k) |
def target_nll_c(inputs, targets, reduction='none'):
conf = torch.softmax(inputs, dim=1)
conf_t = (- F.nll_loss(conf, targets, reduction='none'))
conf_diff = (conf - conf_t.view((- 1), 1))
conf_diff = conf_diff.scatter(1, targets.view((- 1), 1), (- 1))
diff_max = conf_diff.max(1)[0]
if (reduction == 'sum'):
return diff_max.sum()
elif (reduction == 'mean'):
return diff_max.mean()
elif (reduction == 'none'):
return diff_max
else:
raise NotImplementedError() |
class RE23():
def __init__(self):
self.problem_name = 'RE23'
self.n_objectives = 2
self.n_variables = 4
self.n_constraints = 0
self.n_original_constraints = 3
self.ubound = np.zeros(self.n_variables)
self.lbound = np.zeros(self.n_variables)
self.lbound[0] = 1
self.lbound[1] = 1
self.lbound[2] = 10
self.lbound[3] = 10
self.ubound[0] = 100
self.ubound[1] = 100
self.ubound[2] = 200
self.ubound[3] = 240
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
x1 = (0.0625 * int(np.round(x[0])))
x2 = (0.0625 * int(np.round(x[1])))
x3 = x[2]
x4 = x[3]
f[0] = ((((((0.6224 * x1) * x3) * x4) + (((1.7781 * x2) * x3) * x3)) + (((3.1661 * x1) * x1) * x4)) + (((19.84 * x1) * x1) * x3))
g[0] = (x1 - (0.0193 * x3))
g[1] = (x2 - (0.00954 * x3))
g[2] = (((((np.pi * x3) * x3) * x4) + ((4.0 / 3.0) * (((np.pi * x3) * x3) * x3))) - 1296000)
g = np.where((g < 0), (- g), 0)
f[1] = ((g[0] + g[1]) + g[2])
return f |
class StaticTzInfo(BaseTzInfo):
def fromutc(self, dt):
if ((dt.tzinfo is not None) and (dt.tzinfo is not self)):
raise ValueError('fromutc: dt.tzinfo is not self')
return (dt + self._utcoffset).replace(tzinfo=self)
def utcoffset(self, dt, is_dst=None):
return self._utcoffset
def dst(self, dt, is_dst=None):
return _notime
def tzname(self, dt, is_dst=None):
return self._tzname
def localize(self, dt, is_dst=False):
if (dt.tzinfo is not None):
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
if (dt.tzinfo is self):
return dt
if (dt.tzinfo is None):
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return ('<StaticTzInfo %r>' % (self.zone,))
def __reduce__(self):
return (pytz._p, (self.zone,)) |
class ConvBlock(Layer):
def __init__(self, features: int, kernel_size: int, stride: Tuple[(int, int)], cnn_padding: str, pool_size: Tuple[(int, int)], batchnorm: bool, **kwargs):
super(ConvBlock, self).__init__(**kwargs)
self.conv = Conv2D(features, kernel_size, strides=stride, padding=cnn_padding)
self.bn = (BatchNormalization(renorm=True, renorm_clipping={'rmax': 100.0, 'rmin': 0.1, 'dmax': 10.0}, trainable=True) if batchnorm else None)
self.pool = (MaxPool2D(pool_size=pool_size, padding='same') if (list(pool_size) > [1, 1]) else None)
self._features = features
self._kernel_size = kernel_size
self._stride = stride
self._cnn_padding = cnn_padding
self._pool_size = pool_size
self._batchnorm = batchnorm
def call(self, inputs, training=False):
x = self.conv(inputs)
if (self.bn is not None):
x = self.bn(x, training=training)
if (self.pool is not None):
x = self.pool(x)
x = tf.nn.relu(x)
return x
def get_config(self) -> dict:
super_config = super(ConvBlock, self).get_config()
config = {'features': self._features, 'kernel_size': self._kernel_size, 'stride': self._stride, 'cnn_padding': self._cnn_padding, 'pool_size': self._pool_size, 'batchnorm': self._batchnorm}
return dict((list(super_config.items()) + list(config.items()))) |
class Localization(nn.Module):
def __init__(self, cfg):
super(Localization, self).__init__()
self.cfg = cfg
self.batch_size = cfg.BATCH_SIZE_TRAIN
self.model_df = DynamicFilter(cfg)
self.reduction = nn.Linear(cfg.REDUCTION.INPUT_SIZE, cfg.REDUCTION.OUTPUT_SIZE)
self.multimodal_fc1 = nn.Linear((512 * 2), 1)
self.multimodal_fc2 = nn.Linear(512, 1)
self.rnn_localization = nn.GRU(input_size=cfg.LOCALIZATION.INPUT_SIZE, hidden_size=cfg.LOCALIZATION.HIDDEN_SIZE, num_layers=cfg.LOCALIZATION.NUM_LAYERS, bias=cfg.LOCALIZATION.BIAS, dropout=cfg.LOCALIZATION.DROPOUT, bidirectional=cfg.LOCALIZATION.BIDIRECTIONAL, batch_first=cfg.LOCALIZATION.BATCH_FIRST)
self.pooling = POOLING.MeanPoolingLayer()
self.starting = nn.Linear(cfg.CLASSIFICATION.INPUT_SIZE, cfg.CLASSIFICATION.OUTPUT_SIZE)
self.ending = nn.Linear(cfg.CLASSIFICATION.INPUT_SIZE, cfg.CLASSIFICATION.OUTPUT_SIZE)
def attention(self, videoFeat, filter, lengths):
pred_local = torch.bmm(videoFeat, filter.unsqueeze(2)).squeeze()
return pred_local
def get_mask_from_sequence_lengths(self, sequence_lengths: torch.Tensor, max_length: int):
ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)
range_tensor = ones.cumsum(dim=1)
return (sequence_lengths.unsqueeze(1) >= range_tensor).long()
def masked_softmax(self, vector: torch.Tensor, mask: torch.Tensor, dim: int=(- 1), memory_efficient: bool=False, mask_fill_value: float=(- 1e+32)):
if (mask is None):
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while (mask.dim() < vector.dim()):
mask = mask.unsqueeze(1)
if (not memory_efficient):
result = torch.nn.functional.softmax((vector * mask), dim=dim)
result = (result * mask)
result = (result / (result.sum(dim=dim, keepdim=True) + 1e-13))
else:
masked_vector = vector.masked_fill((1 - mask).byte(), mask_fill_value)
result = torch.nn.functional.softmax(masked_vector, dim=dim)
return (result + 1e-13)
def mask_softmax(self, feat, mask):
return self.masked_softmax(feat, mask, memory_efficient=False)
def kl_div(self, p, gt, length):
individual_loss = []
for i in range(length.size(0)):
vlength = int(length[i])
ret = (gt[i][:vlength] * torch.log((p[i][:vlength] / gt[i][:vlength])))
individual_loss.append((- torch.sum(ret)))
individual_loss = torch.stack(individual_loss)
return (torch.mean(individual_loss), individual_loss)
def forward(self, videoFeat, videoFeat_lengths, tokens, tokens_lengths, start, end, localiz, frame_start, frame_end):
mask = self.get_mask_from_sequence_lengths(videoFeat_lengths, int(videoFeat.shape[1]))
(filter_start, lengths) = self.model_df(tokens, tokens_lengths, videoFeat)
videoFeat = self.reduction(videoFeat)
attention = self.attention(videoFeat, filter_start, lengths)
rqrt_length = torch.rsqrt(lengths.float()).unsqueeze(1).repeat(1, attention.shape[1])
attention = (attention * rqrt_length)
attention = self.mask_softmax(attention, mask)
videoFeat_hat = (attention.unsqueeze(2).repeat(1, 1, self.cfg.REDUCTION.OUTPUT_SIZE) * videoFeat)
(output, _) = feed_forward_rnn(self.rnn_localization, videoFeat_hat, lengths=videoFeat_lengths)
pred_start = self.starting(output.view((- 1), output.size(2))).view((- 1), output.size(1), 1).squeeze()
pred_start = self.mask_softmax(pred_start, mask)
pred_end = self.ending(output.view((- 1), output.size(2))).view((- 1), output.size(1), 1).squeeze()
pred_end = self.mask_softmax(pred_end, mask)
(start_loss, individual_start_loss) = self.kl_div(pred_start, start, videoFeat_lengths)
(end_loss, individual_end_loss) = self.kl_div(pred_end, end, videoFeat_lengths)
individual_loss = (individual_start_loss + individual_end_loss)
atten_loss = torch.sum((- ((1 - localiz) * torch.log(((1 - attention) + 1e-12)))), dim=1)
atten_loss = torch.mean(atten_loss)
if True:
total_loss = ((start_loss + end_loss) + atten_loss)
else:
total_loss = (start_loss + end_loss)
return (total_loss, individual_loss, pred_start, pred_end, attention, atten_loss) |
def default_representative(part, G):
D = G.domain()
total = 0
cycles = []
for p in part:
cycles.append(tuple(D[total:(total + p)]))
total += p
return G.element_class(cycles, G, check=False) |
def bidirectional_merge_overlapping(A, B, key=None):
if (key is None):
Akeys = A
Bkeys = B
else:
Akeys = tuple((key(a) for a in A))
Bkeys = tuple((key(b) for b in B))
def find_overlapping_index(A, B):
if (len(B) > (len(A) - 2)):
raise StopIteration
matches = iter((i for i in range(1, (len(A) - len(B))) if (A[i:(i + len(B))] == B)))
return next(matches)
def find_mergedoverlapping_index(A, B):
matches = iter((i for i in range(min(len(A), len(B)), 0, (- 1)) if (A[(- i):] == B[:i])))
return next(matches, 0)
i = find_mergedoverlapping_index(Akeys, Bkeys)
if (i > 0):
return ((A + B[i:]), (A[:(- i)] + B))
i = find_mergedoverlapping_index(Bkeys, Akeys)
if (i > 0):
return ((B[:(- i)] + A), (B + A[i:]))
try:
i = find_overlapping_index(Akeys, Bkeys)
except StopIteration:
pass
else:
return (A, ((A[:i] + B) + A[(i + len(B)):]))
try:
i = find_overlapping_index(Bkeys, Akeys)
except StopIteration:
pass
else:
return (((B[:i] + A) + B[(i + len(A)):]), B)
raise ValueError('Input does not have an overlap.') |
def resnet50w5(pretrained=True, **kwargs):
model = _resnet50w5(**kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url=' map_location='cpu')
state_dict = {k.replace('module.', ''): v for (k, v) in state_dict.items()}
model.load_state_dict(state_dict, strict=False)
return model |
def _format_data(root_path, data_tag, name, wav_folder):
data_path = ((((args.target_dir + data_tag) + '/') + name) + '/')
new_transcript_path = (data_path + '/txt/')
new_wav_path = (data_path + '/wav/')
os.makedirs(new_transcript_path)
os.makedirs(new_wav_path)
wav_path = (root_path + 'wav/')
file_ids = (root_path + ('etc/an4_%s.fileids' % data_tag))
transcripts = (root_path + ('etc/an4_%s.transcription' % data_tag))
train_path = (wav_path + wav_folder)
_convert_audio_to_wav(train_path)
_format_files(file_ids, new_transcript_path, new_wav_path, transcripts, wav_path) |
def contextual_accuracy(expected, observed, data=None, start=None, end=None, weighted=True):
def _cm(x, y, z, w, f):
return contextual_confusion_matrix(x, y, z, w, f, weighted)
return _accuracy(expected, observed, data, start, end, _cm) |
class Mlp(nn.Module):
def __init__(self, in_features, act_layer=nn.GELU, drop=0.0):
super().__init__()
self.fc1 = nn.Linear(in_features, in_features)
self.act = act_layer()
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1()
x = self.act(x)
x = self.drop(x)
return x |
def find_numba_methods(layouttype, behavior):
behavior = overlay_behavior(behavior)
rec = layouttype.parameters.get('__record__')
if isinstance(rec, str):
for (key, typer) in behavior.items():
if (isinstance(key, tuple) and (len(key) == 4) and (key[0] == '__numba_typer__') and (key[1] == rec) and (key[3] == ())):
lower = behavior[('__numba_lower__', key[1], key[2], ())]
(yield (key[2], typer, lower)) |
_spec_function('landing_page')
def get_landing_page_spec(run_human_eval: bool=False) -> RunSpec:
scenario_spec = ScenarioSpec(class_name='helm.benchmark.scenarios.image_generation.landing_page_scenario.LandingPageScenario', args={})
adapter_spec = get_image_generation_adapter_spec(num_outputs=4)
metric_specs: List[MetricSpec] = get_core_heim_metric_specs()
if run_human_eval:
metric_specs += get_heim_critique_metric_specs(include_aesthetics=True, include_subject=True, include_originality=True, num_examples=25)
return RunSpec(name='landing_page', scenario_spec=scenario_spec, adapter_spec=adapter_spec, metric_specs=metric_specs, groups=['landing_page']) |
def get_binary_stream(name):
opener = binary_streams.get(name)
if (opener is None):
raise TypeError("Unknown standard stream '{}'".format(name))
return opener() |
def parse_args():
parser = argparse.ArgumentParser('Conversion script')
parser.add_argument('--data_path', required=True, type=str, help='Path to the gqa dataset')
parser.add_argument('--img_path', required=True, type=str, help='Path to the gqa image dataset')
parser.add_argument('--sg_path', required=True, type=str, help='Path to the gqa dataset scene graph')
parser.add_argument('--vg_img_data_path', required=True, type=str, help='Path to image meta data for VG')
parser.add_argument('--out_path', required=True, default='', type=str, help='Path where to export the resulting dataset. ')
return parser.parse_args() |
def register_Ns3Ipv6InterfaceContainer_methods(root_module, cls):
cls.add_constructor([param('ns3::Ipv6InterfaceContainer const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Ipv6 >', 'ipv6'), param('uint32_t', 'interface')])
cls.add_method('Add', 'void', [param('ns3::Ipv6InterfaceContainer const &', 'c')])
cls.add_method('Add', 'void', [param('std::string', 'ipv6Name'), param('uint32_t', 'interface')])
cls.add_method('Begin', 'ns3::Ipv6InterfaceContainer::Iterator', [], is_const=True)
cls.add_method('End', 'ns3::Ipv6InterfaceContainer::Iterator', [], is_const=True)
cls.add_method('GetAddress', 'ns3::Ipv6Address', [param('uint32_t', 'i'), param('uint32_t', 'j')], is_const=True)
cls.add_method('GetInterfaceIndex', 'uint32_t', [param('uint32_t', 'i')], is_const=True)
cls.add_method('GetLinkLocalAddress', 'ns3::Ipv6Address', [param('uint32_t', 'i')])
cls.add_method('GetLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'address')])
cls.add_method('GetN', 'uint32_t', [], is_const=True)
cls.add_method('SetDefaultRoute', 'void', [param('uint32_t', 'i'), param('uint32_t', 'router')])
cls.add_method('SetDefaultRoute', 'void', [param('uint32_t', 'i'), param('ns3::Ipv6Address', 'routerAddr')])
cls.add_method('SetDefaultRouteInAllNodes', 'void', [param('uint32_t', 'router')])
cls.add_method('SetDefaultRouteInAllNodes', 'void', [param('ns3::Ipv6Address', 'routerAddr')])
cls.add_method('SetForwarding', 'void', [param('uint32_t', 'i'), param('bool', 'state')])
return |
def merge_all_csvs(args):
cache_dir = (args.data.output.path.parent / 'caches')
name = args.data.output.path.name
paths = list(cache_dir.glob('cache_*_*_{}'.format(name)))
groups = group_cache_paths(paths)
for key in sorted(list(groups.keys())):
print('processing cache set {}'.format(key))
paths = groups[key]
print('merging csvs')
out_path = args.data.output.path
counts = merge_csvs(paths, out_path)
if args.verbose:
print('Saved Results: added {} lines to {}'.format(counts, out_path)) |
def getBoundingBoxes(directory, isGT, bbFormat, coordType, allBoundingBoxes=None, allClasses=None, imgSize=(0, 0)):
if (allBoundingBoxes is None):
allBoundingBoxes = BoundingBoxes()
if (allClasses is None):
allClasses = []
os.chdir(directory)
files = glob.glob('*.txt')
files.sort()
for f in files:
nameOfImage = f.replace('.txt', '')
fh1 = open(f, 'r')
for line in fh1:
line = line.replace('\n', '')
if (line.replace(' ', '') == ''):
continue
splitLine = line.split(' ')
if isGT:
idClass = splitLine[0]
x = float(splitLine[1])
y = float(splitLine[2])
w = float(splitLine[3])
h = float(splitLine[4])
bb = BoundingBox(nameOfImage, idClass, x, y, w, h, coordType, imgSize, BBType.GroundTruth, format=bbFormat)
else:
idClass = splitLine[0]
confidence = float(splitLine[1])
x = float(splitLine[2])
y = float(splitLine[3])
w = float(splitLine[4])
h = float(splitLine[5])
bb = BoundingBox(nameOfImage, idClass, x, y, w, h, coordType, imgSize, BBType.Detected, confidence, format=bbFormat)
allBoundingBoxes.addBoundingBox(bb)
if (idClass not in allClasses):
allClasses.append(idClass)
fh1.close()
return (allBoundingBoxes, allClasses) |
def _dispatch_kl(type_p, type_q):
matches = [(super_p, super_q) for (super_p, super_q) in _KL_REGISTRY if (issubclass(type_p, super_p) and issubclass(type_q, super_q))]
if (not matches):
return NotImplemented
(left_p, left_q) = min((_Match(*m) for m in matches)).types
(right_q, right_p) = min((_Match(*reversed(m)) for m in matches)).types
left_fun = _KL_REGISTRY[(left_p, left_q)]
right_fun = _KL_REGISTRY[(right_p, right_q)]
if (left_fun is not right_fun):
warnings.warn('Ambiguous kl_divergence({}, {}). Please register_kl({}, {})'.format(type_p.__name__, type_q.__name__, left_p.__name__, right_q.__name__), RuntimeWarning)
return left_fun |
def main():
args = get_args()
if args.split_sents:
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
tmp = mkdtemp()
PunktSentenceSplitter.split_files(args.input_dir, tmp)
args.input_dir = tmp
Rouge155.convert_summaries_to_rouge_format(args.input_dir, args.output_dir) |
def test_callable():
A = np.random.rand(20)
cls = MyTestClass(12)
assert np.allclose(cls(A), (A * 12)) |
class Report(OrderedDict):
def __init__(self, batch: SampleList=None, model_output: Dict[(str, Any)]=None, *args):
super().__init__(self)
if (batch is None):
return
if (model_output is None):
model_output = {}
if self._check_and_load_tuple(batch):
return
all_args = ([batch, model_output] + [*args])
for (idx, arg) in enumerate(all_args):
if (not isinstance(arg, collections.abc.Mapping)):
raise TypeError('Argument {:d}, {} must be of instance of collections.abc.Mapping'.format(idx, arg))
self.batch_size = batch.get_batch_size()
self.warning_string = 'Updating forward report with key {}{}, but it already exists in {}. Please consider using a different key, as this can cause issues during loss and metric calculations.'
for (idx, arg) in enumerate(all_args):
for (key, item) in arg.items():
if ((key in self) and (idx >= 2)):
log = self.warning_string.format(key, '', 'in previous arguments to report')
warnings.warn(log)
self[key] = item
def get_batch_size(self):
return self.batch_size
def batch_size(self):
return self._batch_size
_size.setter
def batch_size(self, batch_size: int):
self._batch_size = batch_size
def _check_and_load_tuple(self, batch):
if isinstance(batch, collections.abc.Mapping):
return False
if (isinstance(batch[0], (tuple, list)) and isinstance(batch[0][0], str)):
for kv_pair in batch:
self[kv_pair[0]] = kv_pair[1]
return True
else:
return False
def __setattr__(self, key: str, value: Any):
self[key] = value
def __getattr__(self, key: str):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def fields(self):
return list(self.keys())
def apply_fn(self, fn: Callable, fields: Optional[List[str]]=None):
for key in self.keys():
if ((fields is not None) and isinstance(fields, (list, tuple))):
if (key not in fields):
continue
self[key] = fn(self[key])
if isinstance(self[key], collections.MutableSequence):
for (idx, item) in enumerate(self[key]):
self[key][idx] = fn(item)
elif isinstance(self[key], dict):
for subkey in self[key].keys():
self[key][subkey] = fn(self[key][subkey])
return self
def detach(self):
return self.apply_fn(detach_tensor)
def to(self, device: Union[(torch.device, str)], non_blocking: bool=True, fields: Optional[List[str]]=None):
if (not isinstance(device, torch.device)):
if (not isinstance(device, str)):
raise TypeError("device must be either 'str' or 'torch.device' type, {} found".format(type(device)))
device = torch.device(device)
def fn(x):
if hasattr(x, 'to'):
x = x.to(device, non_blocking=non_blocking)
return x
return self.apply_fn(fn, fields)
def accumulate_tensor_fields_and_loss(self, report: 'Report', field_list: List[str]):
for key in field_list:
if (key == '__prediction_report__'):
continue
if (key not in self.keys()):
warnings.warn((f'{key} not found in report. Metrics calculation ' + 'might not work as expected.'))
continue
if isinstance(self[key], torch.Tensor):
self[key] = torch.cat((self[key], report[key]), dim=0)
self._accumulate_loss(report)
def _accumulate_loss(self, report: 'Report'):
for (key, value) in report.losses.items():
if (key not in self.losses.keys()):
warnings.warn((f'{key} not found in report. Loss calculation ' + 'might not work as expected.'))
continue
if isinstance(self.losses[key], torch.Tensor):
self.losses[key] += value
def copy(self):
report = Report()
fields = self.fields()
for field in fields:
report[field] = copy.deepcopy(self[field])
return report |
def retrieval_yr(cls, year, target):
from ecmwfapi import ECMWFDataServer
server = ECMWFDataServer()
if (cls.levtype == 'sfc'):
server.retrieve({'dataset': cls.dataset, 'class': cls.dclass, 'expver': '1', 'grid': '{}/{}'.format(cls.grid, cls.grid), 'date': '{}-01-01/TO/{}-12-31'.format(year, year), 'levtype': cls.levtype, 'param': cls.var_cf_code, 'stream': cls.stream, 'time': cls.time_ana, 'type': cls.type, 'step': cls.step, 'format': 'netcdf', 'target': target})
elif (cls.levtype == 'pl'):
server.retrieve({'dataset': cls.dataset, 'class': cls.dclass, 'expver': '1', 'date': '{}-01-01/TO/{}-12-31'.format(year, year), 'grid': '{}/{}'.format(cls.grid, cls.grid), 'levtype': cls.levtype, 'levelist': cls.lvllist, 'param': cls.var_cf_code, 'stream': cls.stream, 'time': cls.time_ana, 'type': cls.type, 'format': 'netcdf', 'target': target}) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
logger.info(f'Training/evaluation parameters {training_args}')
if ((data_args.source_prefix is None) and (model_args.model_name_or_path in ['t5-small', 't5-base', 't5-large', 't5-3b', 't5-11b'])):
logger.warning("You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with `--source_prefix 'translate English to German: ' `")
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
else:
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
extension = data_args.train_file.split('.')[(- 1)]
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = data_args.validation_file.split('.')[(- 1)]
if (data_args.test_file is not None):
data_files['test'] = data_args.test_file
extension = data_args.test_file.split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model = AutoModelForSeq2SeqLM.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model.resize_token_embeddings(len(tokenizer))
if ((model.config.decoder_start_token_id is None) and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast))):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
if (model.config.decoder_start_token_id is None):
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined')
prefix = (data_args.source_prefix if (data_args.source_prefix is not None) else '')
if training_args.do_train:
column_names = raw_datasets['train'].column_names
elif training_args.do_eval:
column_names = raw_datasets['validation'].column_names
elif training_args.do_predict:
column_names = raw_datasets['test'].column_names
else:
logger.info('There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.')
return
if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)):
assert ((data_args.target_lang is not None) and (data_args.source_lang is not None)), f'{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --source_lang and --target_lang arguments.'
tokenizer.src_lang = data_args.source_lang
tokenizer.tgt_lang = data_args.target_lang
forced_bos_token_id = (tokenizer.lang_code_to_id[data_args.forced_bos_token] if (data_args.forced_bos_token is not None) else None)
model.config.forced_bos_token_id = forced_bos_token_id
source_lang = data_args.source_lang.split('_')[0]
target_lang = data_args.target_lang.split('_')[0]
max_target_length = data_args.max_target_length
padding = ('max_length' if data_args.pad_to_max_length else False)
if ((training_args.label_smoothing_factor > 0) and (not hasattr(model, 'prepare_decoder_input_ids_from_labels'))):
logger.warning(f'label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory')
def preprocess_function(examples):
inputs = [ex[source_lang] for ex in examples['translation']]
targets = [ex[target_lang] for ex in examples['translation']]
inputs = [(prefix + inp) for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
if ((padding == 'max_length') and data_args.ignore_pad_token_for_loss):
labels['input_ids'] = [[(l if (l != tokenizer.pad_token_id) else (- 100)) for l in label] for label in labels['input_ids']]
model_inputs['labels'] = labels['input_ids']
return model_inputs
if training_args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = raw_datasets['train']
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc='train dataset map pre-processing'):
train_dataset = train_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on train dataset')
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
if ('validation' not in raw_datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = raw_datasets['validation']
if (data_args.max_eval_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc='validation dataset map pre-processing'):
eval_dataset = eval_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on validation dataset')
if training_args.do_predict:
max_target_length = data_args.val_max_target_length
if ('test' not in raw_datasets):
raise ValueError('--do_predict requires a test dataset')
predict_dataset = raw_datasets['test']
if (data_args.max_predict_samples is not None):
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
with training_args.main_process_first(desc='prediction dataset map pre-processing'):
predict_dataset = predict_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on prediction dataset')
label_pad_token_id = ((- 100) if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id)
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=(8 if training_args.fp16 else None))
metric = load_metric('sacrebleu')
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return (preds, labels)
def compute_metrics(eval_preds):
(preds, labels) = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if data_args.ignore_pad_token_for_loss:
labels = np.where((labels != (- 100)), labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
(decoded_preds, decoded_labels) = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
result = {'bleu': result['score']}
prediction_lens = [np.count_nonzero((pred != tokenizer.pad_token_id)) for pred in preds]
result['gen_len'] = np.mean(prediction_lens)
result = {k: round(v, 4) for (k, v) in result.items()}
return result
trainer = Seq2SeqTrainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=(compute_metrics if training_args.predict_with_generate else None))
if training_args.do_train:
checkpoint = None
if (training_args.resume_from_checkpoint is not None):
checkpoint = training_args.resume_from_checkpoint
elif (last_checkpoint is not None):
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
metrics = train_result.metrics
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
results = {}
max_length = (training_args.generation_max_length if (training_args.generation_max_length is not None) else data_args.val_max_target_length)
num_beams = (data_args.num_beams if (data_args.num_beams is not None) else training_args.generation_num_beams)
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix='eval')
max_eval_samples = (data_args.max_eval_samples if (data_args.max_eval_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
if training_args.do_predict:
logger.info('*** Predict ***')
predict_results = trainer.predict(predict_dataset, metric_key_prefix='predict', max_length=max_length, num_beams=num_beams)
metrics = predict_results.metrics
max_predict_samples = (data_args.max_predict_samples if (data_args.max_predict_samples is not None) else len(predict_dataset))
metrics['predict_samples'] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics('predict', metrics)
trainer.save_metrics('predict', metrics)
if trainer.is_world_process_zero():
if training_args.predict_with_generate:
predictions = tokenizer.batch_decode(predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True)
predictions = [pred.strip() for pred in predictions]
output_prediction_file = os.path.join(training_args.output_dir, 'generated_predictions.txt')
with open(output_prediction_file, 'w', encoding='utf-8') as writer:
writer.write('\n'.join(predictions))
kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'translation'}
if (data_args.dataset_name is not None):
kwargs['dataset_tags'] = data_args.dataset_name
if (data_args.dataset_config_name is not None):
kwargs['dataset_args'] = data_args.dataset_config_name
kwargs['dataset'] = f'{data_args.dataset_name} {data_args.dataset_config_name}'
else:
kwargs['dataset'] = data_args.dataset_name
languages = [l for l in [data_args.source_lang, data_args.target_lang] if (l is not None)]
if (len(languages) > 0):
kwargs['language'] = languages
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results |
def serre_cartan_basis(n, p=2, bound=1, **kwds):
generic = kwds.get('generic', (p != 2))
if (n == 0):
return ((),)
elif (not generic):
result = [(n,)]
for last in range(bound, (1 + (n // 3))):
for vec in serre_cartan_basis((n - last), bound=(2 * last)):
new = (vec + (last,))
result.append(new)
else:
if (((n % (2 * (p - 1))) == 0) and ((n // (2 * (p - 1))) >= bound)):
result = [(0, int((n // (2 * (p - 1)))), 0)]
elif (n == 1):
result = [(1,)]
else:
result = []
for last in range(bound, (1 + (n // (2 * (p - 1))))):
if ((n - ((2 * (p - 1)) * last)) > 0):
for vec in serre_cartan_basis((n - ((2 * (p - 1)) * last)), p, (p * last), generic=generic):
result.append((vec + (last, 0)))
if (bound == 1):
bound = 0
for last in range((bound + 1), (1 + (n // (2 * (p - 1))))):
basis = serre_cartan_basis(((n - ((2 * (p - 1)) * last)) - 1), p, (p * last), generic=generic)
for vec in basis:
if (vec == ()):
vec = (0,)
new = (vec + (last, 1))
result.append(new)
return tuple(result) |
def load_belle():
dataset_dict = load_dataset('BelleGroup/train_0.5M_CN')
print(dataset_dict)
dataset_dict = cast(DatasetDict, dataset_dict)
dataset_dict = dataset_dict.rename_columns({'instruction': 'text1', 'output': 'text2'})
dataset_dict = dataset_dict.map(add_label, batched=True, remove_columns=['input'])
print(f'processed dataset: {dataset_dict}')
return dataset_dict |
def _seg_49():
return [(64943, 'M', u''), (64944, 'M', u''), (64945, 'M', u''), (64946, 'M', u''), (64947, 'M', u''), (64948, 'M', u''), (64949, 'M', u''), (64950, 'M', u''), (64951, 'M', u''), (64952, 'M', u''), (64953, 'M', u''), (64954, 'M', u''), (64955, 'M', u''), (64956, 'M', u''), (64957, 'M', u''), (64958, 'M', u''), (64959, 'M', u''), (64960, 'M', u''), (64961, 'M', u''), (64962, 'M', u''), (64963, 'M', u''), (64964, 'M', u''), (64965, 'M', u''), (64966, 'M', u''), (64967, 'M', u''), (64968, 'X'), (65008, 'M', u''), (65009, 'M', u''), (65010, 'M', u''), (65011, 'M', u''), (65012, 'M', u''), (65013, 'M', u''), (65014, 'M', u''), (65015, 'M', u''), (65016, 'M', u''), (65017, 'M', u''), (65018, '3', u' '), (65019, '3', u' '), (65020, 'M', u''), (65021, 'V'), (65022, 'X'), (65024, 'I'), (65040, '3', u','), (65041, 'M', u''), (65042, 'X'), (65043, '3', u':'), (65044, '3', u';'), (65045, '3', u'!'), (65046, '3', u'?'), (65047, 'M', u''), (65048, 'M', u''), (65049, 'X'), (65056, 'V'), (65072, 'X'), (65073, 'M', u''), (65074, 'M', u''), (65075, '3', u'_'), (65077, '3', u'('), (65078, '3', u')'), (65079, '3', u'{'), (65080, '3', u'}'), (65081, 'M', u''), (65082, 'M', u''), (65083, 'M', u''), (65084, 'M', u''), (65085, 'M', u''), (65086, 'M', u''), (65087, 'M', u''), (65088, 'M', u''), (65089, 'M', u''), (65090, 'M', u''), (65091, 'M', u''), (65092, 'M', u''), (65093, 'V'), (65095, '3', u'['), (65096, '3', u']'), (65097, '3', u' '), (65101, '3', u'_'), (65104, '3', u','), (65105, 'M', u''), (65106, 'X'), (65108, '3', u';'), (65109, '3', u':'), (65110, '3', u'?'), (65111, '3', u'!'), (65112, 'M', u''), (65113, '3', u'('), (65114, '3', u')'), (65115, '3', u'{'), (65116, '3', u'}'), (65117, 'M', u''), (65118, 'M', u''), (65119, '3', u'#'), (65120, '3', u'&'), (65121, '3', u'*'), (65122, '3', u'+'), (65123, 'M', u'-'), (65124, '3', u'<'), (65125, '3', u'>'), (65126, '3', u'=')] |
def _create_mac(key, msg, method):
if callable(method):
return hmac.HMAC(key, msg, method)
def hashfunc(d=b''):
return hashlib.new(method, d)
hashfunc.__call__ = hashfunc
return hmac.HMAC(key, msg, hashfunc) |
class OPTEngine(CausalEngine):
config_name: str = 'opt_engine'
def __init__(self, weights_path: Optional[Union[(str, Path)]]=None):
super().__init__(model_name='facebook/opt-1.3b', weights_path=weights_path)
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id |
class Colors():
default_colors = [Color(1, 0, 0), Color(0, 1, 0), Color(0, 0, 1), Color(1, 1, 0), Color(1, 0, 1), Color(0, 1, 1)]
def __init__(self):
self.__colors = {}
def add(self, name, color):
self.__colors[name] = color
def lookup(self, name):
if (not self.__colors.has_key(name)):
self.add(name, self.default_colors.pop())
return self.__colors.get(name) |
def get_distance_fn(dist_metric):
if (dist_metric == 'mse'):
return (lambda a, b: (np.sum(((a - b) ** 2)) / float(a.size)))
if (dist_metric == 'ssim'):
return (lambda a, b: compare_ssim(a, b, multichannel=True))
if (dist_metric == 'nrmse_euc'):
return (lambda a, b: compare_nrmse(a, b, norm_type='Euclidean'))
if (dist_metric == 'nrmse_minmax'):
return (lambda a, b: compare_nrmse(a, b, norm_type='min-max'))
if (dist_metric == 'nrmse_mean'):
return (lambda a, b: compare_nrmse(a, b, norm_type='mean'))
if (dist_metric == 'psnr'):
return (lambda a, b: ((- 1) * compare_psnr(a, b))) |
def get_pretrained_tag(pretrained_model):
pretrained_model = pretrained_model.lower()
if (('laion' in pretrained_model) or ('open_clip' in pretrained_model)):
return 'open_clip'
elif ('openai' in pretrained_model):
return 'clip'
elif (('eva' in pretrained_model) and ('clip' in pretrained_model)):
return 'eva_clip'
else:
return 'other' |
def cross_entropy(pred, target):
logsoftmax = nn.LogSoftmax()
return torch.mean(torch.sum(((- target) * logsoftmax(pred)), dim=1)) |
def transfer_prev_model_weights_to_new_model(prev_model, new_model):
params1 = prev_model.named_parameters()
params2 = new_model.named_parameters()
dict_params2 = dict(params2)
for (name1, param1) in params1:
if (name1 in dict_params2):
dict_params2[name1].data.copy_(param1.data)
return new_model |
def worker_init_fn(worker_id: int, num_workers: int, rank: int, seed: int):
worker_seed = (((num_workers * rank) + worker_id) + seed)
np.random.seed(worker_seed)
random.seed(worker_seed) |
class FreeModule_ambient_pid(FreeModule_generic_pid, FreeModule_ambient_domain):
def __init__(self, base_ring, rank, sparse=False, coordinate_ring=None, category=None):
FreeModule_ambient_domain.__init__(self, base_ring=base_ring, rank=rank, sparse=sparse, coordinate_ring=coordinate_ring, category=category)
def _repr_(self):
if self.is_sparse():
return ('Ambient sparse free module of rank %s over the principal ideal domain %s' % (self.rank(), self.base_ring()))
else:
return ('Ambient free module of rank %s over the principal ideal domain %s' % (self.rank(), self.base_ring())) |
def test_ListArray_getitem():
array = ak.highlevel.Array([[0.0, 1.1, 2.2], [], [3.3, 4.4], [5.5], [6.6, 7.7, 8.8, 9.9]])
def f1(x, i):
return x[i]
assert (ak.operations.to_list(f1(array, 0)) == [0.0, 1.1, 2.2])
assert (ak.operations.to_list(f1(array, 1)) == [])
assert (ak.operations.to_list(f1(array, 2)) == [3.3, 4.4])
assert (ak.operations.to_list(f1(array, 3)) == [5.5])
assert (ak.operations.to_list(f1(array, 4)) == [6.6, 7.7, 8.8, 9.9])
def f2(x, i1, i2):
return x[i1:i2]
assert (ak.operations.to_list(f2(array, 1, 4)) == [[], [3.3, 4.4], [5.5]]) |
def test_UnmaskedArray_RecordArray_NumpyArray():
v1 = json.loads('{"class":"UnmaskedArray","content":{"class":"RecordArray","contents":{"nest":{"class":"NumpyArray","inner_shape":[],"itemsize":8,"format":"d","primitive":"float64","parameters":{},"form_key":null}},"parameters":{},"form_key":null},"parameters":{},"form_key":null}')
v2 = ak.forms.from_dict(v1).to_dict()
assert (v2 == {'class': 'UnmaskedArray', 'content': {'class': 'RecordArray', 'fields': ['nest'], 'contents': [{'class': 'NumpyArray', 'primitive': 'float64', 'inner_shape': [], 'parameters': {}, 'form_key': None}], 'parameters': {}, 'form_key': None}, 'parameters': {}, 'form_key': None}) |
def test_smart_array_concatenate_single():
arr = np.random.rand(3, 4, 5)
result = _check_smart_concatenate([arr])
assert (result is arr)
rng = range(10)
result = _check_smart_concatenate([rng], check_strides=False)
assert (result is rng) |
def _build(model, optimizer, weights_only=False, use_param_info_optim=True, max_gradient_norm=None, allow_lr_injection=False):
param_to_device = _get_param_to_device(model)
model.Validate()
params = []
for param_info in model.GetOptimizationParamInfo():
if (weights_only and (param_info.blob not in model.weights)):
continue
params.append(param_info)
lr_multiplier = None
if (max_gradient_norm is not None):
lr_multiplier = _calc_norm_ratio(model, params, 'norm_clipped_grad_update', param_to_device, max_gradient_norm)
if allow_lr_injection:
if (not model.net.BlobIsDefined(_LEARNING_RATE_INJECTION)):
lr_injection = model.param_init_net.ConstantFill([], _LEARNING_RATE_INJECTION, shape=[1], value=1.0)
else:
lr_injection = _LEARNING_RATE_INJECTION
if (lr_multiplier is None):
lr_multiplier = lr_injection
else:
lr_multiplier = model.net.Mul([lr_multiplier, lr_injection], 'lr_multiplier', broadcast=1)
optimizer.add_lr_multiplier(lr_multiplier)
for param_info in params:
param_name = str(param_info.blob)
device = get_param_device(param_name, param_info.grad, param_to_device)
with core.DeviceScope(device):
if (param_info.optimizer and use_param_info_optim):
param_info.optimizer(model.net, model.param_init_net, param_info)
else:
optimizer(model.net, model.param_init_net, param_info)
return optimizer |
class Base(Layer, Graphable):
__ases: Dict[(int, AutonomousSystem)]
__ixes: Dict[(int, InternetExchange)]
__name_servers: List[str]
def __init__(self):
super().__init__()
self.__ases = {}
self.__ixes = {}
self.__name_servers = []
def getName(self) -> str:
return 'Base'
def configure(self, emulator: Emulator):
self._log('registering nodes...')
for asobj in self.__ases.values():
if (len(asobj.getNameServers()) == 0):
asobj.setNameServers(self.__name_servers)
asobj.registerNodes(emulator)
self._log('setting up internet exchanges...')
for ix in self.__ixes.values():
ix.configure(emulator)
self._log('setting up autonomous systems...')
for asobj in self.__ases.values():
asobj.configure(emulator)
def render(self, emulator: Emulator) -> None:
for ((scope, type, name), obj) in emulator.getRegistry().getAll().items():
if (not ((type == 'rs') or (type == 'rnode') or (type == 'hnode'))):
continue
node: Node = obj
ifinfo = ''
for iface in node.getInterfaces():
net = iface.getNet()
[l, b, d] = iface.getLinkProperties()
ifinfo += '{}:{}:{}:{}:{}\n'.format(net.getName(), net.getPrefix(), l, b, d)
node.setFile('/ifinfo.txt', ifinfo)
node.setFile('/interface_setup', BaseFileTemplates['interface_setup_script'])
node.insertStartCommand(0, '/interface_setup')
node.insertStartCommand(0, 'chmod +x /interface_setup')
def setNameServers(self, servers: List[str]) -> Base:
self.__name_servers = servers
return self
def getNameServers(self) -> List[str]:
return self.__name_servers
def createAutonomousSystem(self, asn: int) -> AutonomousSystem:
assert (asn not in self.__ases), 'as{} already exist.'.format(asn)
self.__ases[asn] = AutonomousSystem(asn)
return self.__ases[asn]
def getAutonomousSystem(self, asn: int) -> AutonomousSystem:
assert (asn in self.__ases), 'as{} does not exist.'.format(asn)
return self.__ases[asn]
def setAutonomousSystem(self, asObject: AutonomousSystem):
asn = asObject.getAsn()
self.__ases[asn] = asObject
def createInternetExchange(self, asn: int, prefix: str='auto', aac: AddressAssignmentConstraint=None) -> InternetExchange:
assert (asn not in self.__ixes), 'ix{} already exist.'.format(asn)
self.__ixes[asn] = InternetExchange(asn, prefix, aac)
return self.__ixes[asn]
def getInternetExchange(self, asn: int) -> InternetExchange:
assert (asn in self.__ixes), 'ix{} does not exist.'.format(asn)
return self.__ixes[asn]
def setInternetExchange(self, ixObject: InternetExchange):
asn = ixObject.getId()
self.__ixes[asn] = ixObject
def getAsns(self) -> List[int]:
return list(self.__ases.keys())
def getInternetExchangeIds(self) -> List[int]:
return list(self.__ixes.keys())
def getNodesByName(self, name: str) -> List[Node]:
nodes = []
for _as in self.__ases.values():
for host_name in _as.getHosts():
if host_name.startswith(name):
nodes.append(_as.getHost(host_name))
return nodes
def _doCreateGraphs(self, emulator: Emulator):
graph = self._addGraph('Layer 2 Connections', False)
for asobj in self.__ases.values():
asobj.createGraphs(emulator)
asgraph = asobj.getGraph('AS{}: Layer 2 Connections'.format(asobj.getAsn()))
graph.copy(asgraph)
def print(self, indent: int) -> str:
out = (' ' * indent)
out += 'BaseLayer:\n'
indent += 4
out += (' ' * indent)
out += 'AutonomousSystems:\n'
for _as in self.__ases.values():
out += _as.print((indent + 4))
out += (' ' * indent)
out += 'InternetExchanges:\n'
for _as in self.__ixes.values():
out += _as.print((indent + 4))
return out |
def remove_variation_selectors(text):
for var in VARIATION_SELECTORS:
text = text.replace(var, u'')
return text |
def create_or_load_model(model, model_dir, session, name, ckpt_index=None):
if (model_dir and (ckpt_index is not None)):
ckpt_state = tf.train.get_checkpoint_state(model_dir)
ckpt_path = ckpt_state.all_model_checkpoint_paths[ckpt_index]
else:
ckpt_path = tf.train.latest_checkpoint(model_dir)
if ckpt_path:
model = load_model(model, ckpt_path, session, name)
else:
start_time = time.time()
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
utils.print_out((' created %s model with fresh parameters, time %.2fs' % (name, (time.time() - start_time))))
global_step = model.global_step.eval(session=session)
return (model, global_step) |
def theano_multinomial(n, pvals, seed):
rng = RandomStreams(seed)
return rng.multinomial(n=n, pvals=pvals, dtype='float32') |
class CustomDatasetTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.data_dir = osp.join(osp.dirname(osp.dirname(osp.dirname(__file__))), 'data')
self.dataset_class = DATASETS.get('XMLDataset')
def test_data_infos__default_db_directories(self):
test_dataset_root = osp.join(self.data_dir, 'VOCdevkit', 'VOC2007')
custom_ds = self.dataset_class(data_root=test_dataset_root, ann_file=osp.join(test_dataset_root, 'ImageSets', 'Main', 'trainval.txt'), pipeline=[], classes=('person', 'dog'), test_mode=True)
self.assertListEqual([{'id': '000001', 'filename': osp.join('JPEGImages', '000001.jpg'), 'width': 353, 'height': 500}], custom_ds.data_infos)
def test_data_infos__overridden_db_subdirectories(self):
test_dataset_root = osp.join(self.data_dir, 'custom_dataset')
custom_ds = self.dataset_class(data_root=test_dataset_root, ann_file=osp.join(test_dataset_root, 'trainval.txt'), pipeline=[], classes=('person', 'dog'), test_mode=True, img_prefix='', img_subdir='images', ann_subdir='images')
self.assertListEqual([{'id': '000001', 'filename': osp.join('images', '000001.jpg'), 'width': 353, 'height': 500}], custom_ds.data_infos) |
class BroadcastedLinear(nn.Module):
def __init__(self, P_x, in_features, out_features, dtype=torch.float32):
super().__init__()
self.P_x = P_x
self.P_0 = create_root_partition(P_x)
self.in_features = in_features
self.out_features = out_features
self.dtype = dtype
if self.P_0.active:
self.W = nn.Parameter(torch.rand(in_features, out_features, dtype=dtype))
self.b = nn.Parameter(torch.rand(out_features, dtype=dtype))
else:
self.W = nn.Parameter(zero_volume_tensor(dtype=dtype))
self.b = nn.Parameter(zero_volume_tensor(dtype=dtype))
self.BW = dnn.Broadcast(self.P_0, self.P_x)
self.Bb = dnn.Broadcast(self.P_0, self.P_x)
def forward(self, x):
W = self.BW(self.W)
b = self.Bb(self.b)
return ((W x) + b) |
()
('p_e_m_file', type=click.Path(exists=True))
('dump_db_file', type=click.Path(exists=True))
('wiki_mention_db_file', type=click.Path(exists=True))
('out_file', type=click.Path())
('--max-mention-length', default=20)
def build_from_p_e_m_file(p_e_m_file, dump_db_file, wiki_mention_db_file, **kwargs):
dump_db = DumpDB(dump_db_file)
tokenizer = BasicTokenizer(do_lower_case=False)
normalizer = BertLowercaseNormalizer()
wiki_mention_db = MentionDB(wiki_mention_db_file)
MentionDB.build_from_p_e_m_file(p_e_m_file, dump_db, wiki_mention_db, tokenizer, normalizer, **kwargs) |
def register_Ns3DefaultDeleter__Ns3Dot11sIeBeaconTimingUnit_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::DefaultDeleter< ns3::dot11s::IeBeaconTimingUnit > const &', 'arg0')])
cls.add_method('Delete', 'void', [param('ns3::dot11s::IeBeaconTimingUnit *', 'object')], is_static=True)
return |
def hexstring2npbytearray(hexstring, remove_prefix='0x'):
if hexstring.startswith(remove_prefix):
lrp = len(remove_prefix)
hexstring = hexstring[lrp:]
return np.asarray(bytearray.fromhex(hexstring), dtype=np.uint8) |
def generate_data(args):
if (args.perturbation2 != 'identity'):
if (args.perturbation3 != 'identity'):
perturbed_dir = ((((Path(args.dst_dir) / args.perturbation) / args.perturbation2) / args.perturbation3) / f'level_{args.level}')
else:
perturbed_dir = (((Path(args.dst_dir) / args.perturbation) / args.perturbation2) / f'level_{args.level}')
else:
perturbed_dir = ((Path(args.dst_dir) / args.perturbation) / f'level_{args.level}')
perturbed_dir.mkdir(parents=True, exist_ok=True)
src_df = pd.read_csv(args.src_csv)
paths = list(src_df[COL_PATH])
with concurrent.futures.ProcessPoolExecutor() as executor:
results = [executor.submit(process_perturbation, path, args, perturbed_dir) for path in paths]
for f in tqdm(concurrent.futures.as_completed(results), total=len(paths)):
pass
src_df[COL_PATH] = src_df[COL_PATH].apply(get_dst_img_path, args=(args.split, perturbed_dir))
src_df.to_csv((perturbed_dir / f'{args.split}.csv'), index=False) |
def update_email_subject(downloaded_email, email_subject):
new_subject = f'[ACTIONED] {email_subject}'
downloaded_email.replace_header('Subject', new_subject)
logger.info('Message subject modified to: %s', new_subject)
return downloaded_email |
class Scores(object):
def __init__(self):
self.true_positives = 0
self.false_positives = 0
self.true_negatives = 0
self.false_negatives = 0
def recall(self):
numerator = self.true_positives
denominator = (self.true_positives + self.false_negatives)
return (((100.0 * numerator) / denominator) if denominator else 0.0)
def precision(self):
numerator = self.true_positives
denominator = (self.true_positives + self.false_positives)
return (((100.0 * numerator) / denominator) if denominator else 0.0)
def f1(self):
recall = self.recall()
precision = self.precision()
numerator = ((2 * precision) * recall)
denominator = (precision + recall)
return ((numerator / denominator) if denominator else 0.0) |
def test_inheritance():
class Empty(interface.LinearOperator):
pass
with warns(RuntimeWarning, match='should implement at least'):
assert_raises(TypeError, Empty)
class Identity(interface.LinearOperator):
def __init__(self, n):
super(Identity, self).__init__(dtype=None, shape=(n, n))
def _matvec(self, x):
return x
id3 = Identity(3)
assert_equal(id3.matvec([1, 2, 3]), [1, 2, 3])
assert_raises(NotImplementedError, id3.rmatvec, [4, 5, 6])
class MatmatOnly(interface.LinearOperator):
def __init__(self, A):
super(MatmatOnly, self).__init__(A.dtype, A.shape)
self.A = A
def _matmat(self, x):
return self.A.dot(x)
mm = MatmatOnly(np.random.randn(5, 3))
assert_equal(mm.matvec(np.random.randn(3)).shape, (5,)) |
def main(args):
args = parse_args(args)
if (args.eval and args.format_only):
raise ValueError('--eval and --format_only cannot be both specified')
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
cfg = replace_cfg_vals(cfg)
update_data_root(cfg)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
cfg = compat_cfg(cfg)
setup_multi_processes(cfg)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if ('pretrained' in cfg.model):
cfg.model.pretrained = None
elif ('init_cfg' in cfg.model.backbone):
cfg.model.backbone.init_cfg = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
if (args.gpu_ids is not None):
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn(str6)
else:
cfg.gpu_ids = [args.gpu_id]
cfg.device = get_device()
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
test_dataloader_default_args = dict(samples_per_gpu=1, workers_per_gpu=2, dist=distributed, shuffle=False)
if isinstance(cfg.data.val, dict):
cfg.data.val.test_mode = True
if (cfg.data.test_dataloader.get('samples_per_gpu', 1) > 1):
cfg.data.val.pipeline = replace_ImageToTensor(cfg.data.val.pipeline)
elif isinstance(cfg.data.val, list):
for ds_cfg in cfg.data.val:
ds_cfg.test_mode = True
if (cfg.data.test_dataloader.get('samples_per_gpu', 1) > 1):
for ds_cfg in cfg.data.val:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
test_loader_cfg = {**test_dataloader_default_args, **cfg.data.get('test_dataloader', {})}
(rank, _) = get_dist_info()
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
cfg.model.train_cfg = None
model = build_train_model(cfg, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
log_file = osp.join(cfg.work_dir, f'evaluation_logs_{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for (k, v) in env_info_dict.items()])
dash_line = (('-' * 60) + '\n')
logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line))
logger.info(f'''Config:
{cfg.pretty_text}''')
deterministic = False
logger.info(f'Set random seed to {cfg.seed}, deterministic: {deterministic}')
set_random_seed(cfg.seed, deterministic=deterministic)
checkpoint = None
outputs = None
if cfg.checkpoint_path:
checkpoint_path = cfg.checkpoint_path
logger.info('The following checkpoints will be evaluated ...')
logger.info(checkpoint_path)
checkpoint_file_path = os.path.join(checkpoint_path, 'latest.pth')
logger.info(f'Evaluation will be done for the model {checkpoint_file_path}')
checkpoint = load_checkpoint(model, checkpoint_file_path, map_location='cpu')
panop_eval_temp_folder = cfg['evaluation']['panop_eval_temp_folder']
logger.info(f'Evaluation results will be saved at: {panop_eval_temp_folder}')
json_file = osp.join(cfg.work_dir, f'evaluation_results_{timestamp}.json')
logger.info(f'Final evaluation results JSON file will be saved at: {json_file}')
dataset = build_dataset(cfg.data.val)
data_loader = build_dataloader(dataset, **test_loader_cfg)
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
if (cfg.checkpoint_path and checkpoint):
if ('CLASSES' in checkpoint.get('meta', {})):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if (not distributed):
model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids)
if cfg.generate_only_visuals_without_eval:
outputs = single_gpu_test_uda_for_visual_debug(model, data_loader, show=args.show, out_dir=args.show_dir, debug=cfg.debug, show_score_thr=args.show_score_thr, dataset_name=cfg.evaluation.dataset_name, panop_eval_temp_folder=panop_eval_temp_folder)
elif cfg.dump_predictions_to_disk:
dump_path = os.path.join(cfg.work_dir, 'results_numpys')
os.makedirs(dump_path, exist_ok=True)
outputs = single_gpu_test_uda_dump_results_to_disk(model, data_loader, out_dir=dump_path, debug=cfg.debug, dataset_name=cfg.evaluation.dataset_name, logger=logger)
elif cfg.evaluate_from_saved_png_predictions:
pass
else:
outputs = single_gpu_test_uda(model, data_loader, show=args.show, out_dir=args.show_dir, debug=cfg.debug, show_score_thr=args.show_score_thr, dataset_name=cfg.evaluation.dataset_name)
else:
model = build_ddp(model, cfg.device, device_ids=[int(os.environ['LOCAL_RANK'])], broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir, (args.gpu_collect or cfg.evaluation.get('gpu_collect', False)))
if cfg.generate_only_visuals_without_eval:
pass
elif (cfg.evaluate_from_saved_png_predictions and (not outputs)):
cfg['evaluation']['panop_eval_temp_folder'] = cfg['panop_eval_temp_folder_previous']
eval_kwargs = cfg.get('evaluation', {}).copy()
for key in ['interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule', 'dynamic_intervals']:
eval_kwargs.pop(key, None)
metric = dataset.evaluate(outputs, logger=logger, **eval_kwargs)
logger.info(metric)
metric_dict = dict(config=args.config, metric=metric)
if ((json_file is not None) and (rank == 0)):
mmcv.dump(metric_dict, json_file)
elif (cfg.dump_predictions_to_disk and outputs):
eval_kwargs = cfg.get('evaluation', {}).copy()
for key in ['interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule', 'dynamic_intervals']:
eval_kwargs.pop(key, None)
metric = dataset.evaluate(outputs, logger=logger, **eval_kwargs)
logger.info(metric)
metric_dict = dict(config=args.config, metric=metric)
if ((json_file is not None) and (rank == 0)):
mmcv.dump(metric_dict, json_file)
elif outputs:
(rank, _) = get_dist_info()
if (rank == 0):
if args.out:
print(f'''
writing results to {args.out}''')
mmcv.dump(outputs, args.out)
kwargs = ({} if (args.eval_options is None) else args.eval_options)
if args.format_only:
dataset.format_results(outputs, **kwargs)
eval_kwargs = cfg.get('evaluation', {}).copy()
for key in ['interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule', 'dynamic_intervals']:
eval_kwargs.pop(key, None)
metric = dataset.evaluate(outputs, logger=logger, **eval_kwargs)
logger.info(metric)
metric_dict = dict(config=args.config, metric=metric)
if ((json_file is not None) and (rank == 0)):
mmcv.dump(metric_dict, json_file) |
class DalleBartConfig(PretrainedFromWandbMixin, PretrainedConfig):
model_type = 'dallebart'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, normalize_text=False, encoder_vocab_size=50264, image_vocab_size=16384, image_length=256, max_text_length=64, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, scale_embedding=False, gradient_checkpointing=True, use_scan=None, use_cache=True, is_encoder_decoder=True, forced_eos_token_id=None, tie_word_embeddings=False, do_sample=True, use_bias=False, ln_type='layernorm', ln_positions='normformer', use_head_scale=False, use_cosine_attention=False, tau_init=0.05, use_absolute_position_embeddings=True, use_swin_position_embeddings=False, use_deepnet_scaling=False, use_subln_init=False, use_glu=True, use_alibi=False, sinkhorn_iters=1, use_final_ln_encoder=True, use_final_ln_decoder=True, force_ln_scale=False, **kwargs):
self.normalize_text = normalize_text
self.use_bias = use_bias
assert (ln_type in ['rmsnorm', 'layernorm']), "ln_type must be 'rmsnorm' or 'layernorm'"
self.ln_type = ln_type
if (ln_positions == 'deepnet'):
ln_positions = 'postln'
assert (ln_positions in ['normformer', 'swinv2', 'cogview', 'postln', 'preln', 'subln']), "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln', 'subln'"
self.use_head_scale = use_head_scale
assert (use_alibi is False), 'use_alibi is not supported yet'
self.ln_positions = ln_positions
self.use_cosine_attention = use_cosine_attention
self.tau_init = tau_init
self.use_absolute_position_embeddings = use_absolute_position_embeddings
self.use_swin_position_embeddings = use_swin_position_embeddings
self.use_deepnet_scaling = use_deepnet_scaling
self.use_subln_init = use_subln_init
self.use_glu = use_glu
self.use_alibi = use_alibi
self.sinkhorn_iters = sinkhorn_iters
if (ln_positions == 'postln'):
assert use_final_ln_encoder, "use_final_ln_encoder must be True when ln_positions is 'postln'"
assert use_final_ln_decoder, "use_final_ln_decoder must be True when ln_positions is 'postln'"
self.use_final_ln_encoder = use_final_ln_encoder
self.use_final_ln_decoder = use_final_ln_decoder
self.force_ln_scale = force_ln_scale
self.encoder_vocab_size = encoder_vocab_size
self.image_vocab_size = image_vocab_size
self.image_length = image_length
self.max_text_length = max_text_length
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
self.use_scan = (use_scan if (use_scan is not None) else (ln_positions != 'swinv2'))
assert (not (self.use_scan and (ln_positions == 'swinv2'))), "scan cannot be used with 'swinv2'"
self.scale_embedding = scale_embedding
decoder_start_token_id = kwargs.pop('decoder_start_token_id', image_vocab_size)
bos_token_id = kwargs.pop('bos_token_id', image_vocab_size)
pad_token_id = kwargs.pop('pad_token_id', image_vocab_size)
eos_token_id = kwargs.pop('eos_token_id', image_vocab_size)
min_length = kwargs.pop('min_length', (image_length + 1))
max_length = kwargs.pop('max_length', (image_length + 1))
super().__init__(is_encoder_decoder=is_encoder_decoder, tie_word_embeddings=tie_word_embeddings, forced_eos_token_id=forced_eos_token_id, decoder_start_token_id=decoder_start_token_id, bos_token_id=bos_token_id, pad_token_id=pad_token_id, eos_token_id=eos_token_id, min_length=min_length, max_length=max_length, do_sample=do_sample, **kwargs)
if ((self.forced_bos_token_id is None) and kwargs.get('force_bos_token_to_be_generated', False)):
self.forced_bos_token_id = self.bos_token_id
warnings.warn(f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions.The config can simply be saved and uploaded again to be fixed.') |
class _Player():
def __init__(self, num_strategies):
self.num_strategies = num_strategies
def add_strategy(self):
self.num_strategies += 1 |
def register_model_func(generated_file_name_or_path, _get_normal_model_instance, get_extra=None):
d = dict(_get_normal_model_instance=_get_normal_model_instance)
if get_extra:
d['get_extra'] = get_extra
handler_cls = type('AutoGeneratedModelHandler', (CommonModelHandler,), d)
handler: CommonModelHandler = handler_cls()
handler.register_autogenerated(generated_file_name_or_path=generated_file_name_or_path) |
def create_updown_map(logfile):
updown_map = {'up': {}, 'down': {}}
fcnt_up = None
linkadrreq = None
with open(logfile, 'r', encoding='utf8') as log:
block_id = None
block_data = {}
for line in log:
line = line.strip()
if line.startswith('>'):
block_id = line[4:]
block_data = {}
elif ((block_id is not None) and line.startswith(('<' + block_id))):
if (block_id == 'ADRREQ'):
linkadrreq = block_data
elif ((block_id == 'RX') and (fcnt_up is not None)):
fcnt_down = (('as-%05d' if (block_data['port'] > 0) else 'ns-%05d') % block_data['fcnt'])
updown_map['up'][fcnt_up] = {'fCntDown': fcnt_down}
updown_map['down'][fcnt_down] = {'fCntUp': fcnt_up}
if (linkadrreq is not None):
updown_map['up'][fcnt_up]['linkADRReq'] = linkadrreq
updown_map['down'][fcnt_down]['linkADRReq'] = linkadrreq
linkadrreq = None
fcnt_up = None
elif (block_id == 'TX'):
fcnt_up = (('as-%05d' if (block_data['port'] > 0) else 'ns-%05d') % block_data['fcnt'])
block_id = None
elif ((block_id is not None) and ('=' in line)):
(k, v) = line.split('=', 1)
block_data[k.lower()] = (int(v) if v.isdecimal() else v)
return updown_map |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.