code stringlengths 101 5.91M |
|---|
def basic_gn_shortcut(model, prefix, blob_in, dim_in, dim_out, stride):
if (dim_in == dim_out):
return blob_in
return model.ConvGN(blob_in, (prefix + '_branch1'), dim_in, dim_out, kernel=1, group_gn=get_group_gn(dim_out), stride=stride, pad=0, group=1) |
class TaskRunner():
def __init__(self, data_loader, tensor_dict_split_fn_kwargs: dict=None, **kwargs):
self.data_loader = data_loader
self.feature_shape = self.data_loader.get_feature_shape()
if (tensor_dict_split_fn_kwargs is None):
tensor_dict_split_fn_kwargs = {}
self.tensor_dict_split_fn_kwargs = tensor_dict_split_fn_kwargs
self.set_logger()
def set_logger(self):
self.logger = getLogger(__name__)
def set_optimizer_treatment(self, opt_treatment):
self.opt_treatment = opt_treatment
def get_data_loader(self):
return self.data_loader
def set_data_loader(self, data_loader):
if (data_loader.get_feature_shape() != self.data_loader.get_feature_shape()):
raise ValueError('The data_loader feature shape is not compatible with model.')
self.data_loader = data_loader
def get_train_data_size(self):
return self.data_loader.get_train_data_size()
def get_valid_data_size(self):
return self.data_loader.get_valid_data_size()
def train_batches(self, num_batches=None, use_tqdm=False):
raise NotImplementedError
def validate(self):
raise NotImplementedError
def get_required_tensorkeys_for_function(self, func_name, **kwargs):
raise NotImplementedError
def get_tensor_dict(self, with_opt_vars):
raise NotImplementedError
def set_tensor_dict(self, tensor_dict, with_opt_vars):
raise NotImplementedError
def reset_opt_vars(self):
raise NotImplementedError
def initialize_globals(self):
raise NotImplementedError
def load_native(self, filepath, **kwargs):
raise NotImplementedError
def save_native(self, filepath, **kwargs):
raise NotImplementedError |
class Modelnet40Config(Config):
dataset = 'ModelNet40'
num_classes = None
dataset_task = ''
input_threads = 10
architecture = ['simple', 'resnetb', 'resnetb_strided', 'resnetb', 'resnetb', 'resnetb_strided', 'resnetb', 'resnetb', 'resnetb_strided', 'resnetb', 'resnetb', 'resnetb_strided', 'resnetb', 'resnetb', 'global_average']
num_kernel_points = 15
first_subsampling_dl = 0.02
conv_radius = 2.5
deform_radius = 6.0
KP_extent = 1.2
KP_influence = 'linear'
aggregation_mode = 'sum'
in_features_dim = 1
modulated = True
use_batch_norm = True
batch_norm_momentum = 0.05
deform_fitting_mode = 'point2point'
deform_fitting_power = 1.0
deform_lr_factor = 0.1
repulse_extent = 1.2
max_epoch = 500
learning_rate = 0.01
momentum = 0.98
lr_decays = {i: (0.1 ** (1 / 100)) for i in range(1, max_epoch)}
grad_clip_norm = 100.0
batch_num = 10
epoch_steps = 300
validation_size = 30
checkpoint_gap = 50
augment_scale_anisotropic = True
augment_symmetries = [True, True, True]
augment_rotation = 'none'
augment_scale_min = 0.8
augment_scale_max = 1.2
augment_noise = 0.001
augment_color = 1.0
segloss_balance = 'none'
saving = True
saving_path = None |
def group_normalization(x, beta, gamma, num_groups, channel_axis=1, batch_axis=0, eps=1e-05, output_stat=False):
from .function_bases import group_normalization as group_normalization_base
n_outputs = (3 if output_stat else 1)
batch_axis = _force_list(batch_axis)
no_scale = (gamma is None)
no_bias = (beta is None)
return group_normalization_base(x, beta=beta, gamma=gamma, num_groups=num_groups, channel_axis=channel_axis, batch_axis=batch_axis, eps=eps, no_scale=no_scale, no_bias=no_bias, n_outputs=n_outputs) |
class RowStandardTableaux(Tableaux):
def __classcall_private__(cls, *args, **kwargs):
from sage.combinat.partition import _Partitions
from sage.combinat.skew_partition import SkewPartitions
if args:
n = args[0]
elif ('n' in kwargs):
n = kwargs[n]
else:
n = None
if (n is None):
return RowStandardTableaux_all()
elif (n in _Partitions):
return RowStandardTableaux_shape(_Partitions(n))
elif (n in SkewPartitions()):
raise NotImplementedError('row standard skew tableaux not yet implemented')
if ((not isinstance(n, (int, Integer))) or (n < 0)):
raise ValueError('the argument must be a non-negative integer or a partition')
return RowStandardTableaux_size(n)
Element = RowStandardTableau
def __contains__(self, x):
if isinstance(x, RowStandardTableau):
return True
elif Tableaux.__contains__(self, x):
flatx = sorted(sum((list(row) for row in x), []))
return ((flatx == list(range(1, (len(flatx) + 1)))) and all(((row[i] < row[(i + 1)]) for row in x for i in range((len(row) - 1)))))
return False |
class CamembertTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', additional_special_tokens=['<s>NOTUSED', '</s>NOTUSED'], sp_model_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> None:
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs)
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
self.fairseq_tokens_to_ids = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
self.fairseq_offset = len(self.fairseq_tokens_to_ids)
self.fairseq_tokens_to_ids['<mask>'] = (len(self.sp_model) + len(self.fairseq_tokens_to_ids))
self.fairseq_ids_to_tokens = {v: k for (k, v) in self.fairseq_tokens_to_ids.items()}
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is None):
return (([1] + ([0] * len(token_ids_0))) + [1])
return (((([1] + ([0] * len(token_ids_0))) + [1, 1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def vocab_size(self):
return (len(self.fairseq_tokens_to_ids) + len(self.sp_model))
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
if (token in self.fairseq_tokens_to_ids):
return self.fairseq_tokens_to_ids[token]
elif (self.sp_model.PieceToId(token) == 0):
return self.unk_token_id
return (self.fairseq_offset + self.sp_model.PieceToId(token))
def _convert_id_to_token(self, index):
if (index in self.fairseq_ids_to_tokens):
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece((index - self.fairseq_offset))
def convert_tokens_to_string(self, tokens):
current_sub_tokens = []
out_string = ''
prev_is_special = False
for token in tokens:
if (token in self.all_special_tokens):
if (not prev_is_special):
out_string += ' '
out_string += (self.sp_model.decode(current_sub_tokens) + token)
prev_is_special = True
current_sub_tokens = []
else:
current_sub_tokens.append(token)
prev_is_special = False
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip()
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
if (not hasattr(self, 'sp_model_kwargs')):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
elif (not os.path.isfile(self.vocab_file)):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,) |
class MaskRCNN(Detector):
def __init__(self, new_size=(416, 416), **kwargs):
super(MaskRCNN, self).__init__()
self.p = yaml.load(open('/home/code/classifiers/params.yaml', 'r'), Loader=yaml.FullLoader)['maskrcnn']['_base']
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.compat.v1.Session(config=config)
self.class_dict = {i: c for (i, c) in enumerate(['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'])}
saver = tf.compat.v1.train.import_meta_graph(self.p['meta_graph_path'])
saver.restore(self.sess, self.p['restore_path'])
self.boxes = tf.compat.v1.get_default_graph().get_tensor_by_name('output/boxes:0')
self.scores = tf.compat.v1.get_default_graph().get_tensor_by_name('output/scores:0')
self._scores = tf.compat.v1.get_default_graph().get_tensor_by_name('fastrcnn_all_scores:0')
self.labels = tf.compat.v1.get_default_graph().get_tensor_by_name('output/labels:0')
self.input = tf.compat.v1.get_default_graph().get_tensor_by_name('image:0')
self.new_size = new_size
def needs_roi(self):
return False
def preprocess_input(self, image):
assert (image.dtype == np.uint8), image.dtype
assert (image.ndim == 3), image.ndim
(height_ori, width_ori) = image.shape[:2]
img = cv2.resize(image, (self.new_size[0], self.new_size[1]), cv2.INTER_LINEAR)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return (img, height_ori, width_ori)
def forward(self, img_ori, tracker_box=None, tracker_pad=None, tracker_min_pad=None):
(x, y, w, h) = [max(int(v), 0) for v in tracker_box]
(img, height_ori, width_ori) = self.preprocess_input(img_ori)
(boxes_, scores_, labels_, _scores_) = self.sess.run([self.boxes, self.scores, self.labels, self._scores], feed_dict={self.input: img})
boxes_ = clfutils.scale_box_to_org_size(boxes_, width_ori, height_ori, tar_width=self.new_size[0], tar_height=self.new_size[1])
tracker_roi = [x, y, (x + w), (y + h)]
return_dict = dict()
return_dict['boxes'] = boxes_
return_dict['scores'] = scores_
return_dict['_scores'] = _scores_
return_dict['labels'] = labels_
return_dict['tracker_roi'] = tracker_roi
return_dict['input'] = img
return return_dict
def adjust_input_for_plot(input):
return (input[(..., [2, 1, 0])].astype(float) / 255.0)
def detect_and_draw_all(self, result, det_threshold, img, color=(0, 255, 0)):
for (j, box) in enumerate(result['boxes']):
(x0, y0, x1, y1) = box.flatten()
score = result['scores'][j]
label = result['labels'][j]
class_name = self.class_dict[label]
if (score > det_threshold):
print(('Box at %s for %s (%s), score %s' % (box, class_name, label, score)))
clfutils.plot_one_box(img, [x0, y0, x1, y1], label=('%s (%.3f)' % (class_name, score)), color=color)
return img |
_ordering
class Simplex(SageObject):
def __init__(self, X):
try:
N = (int(X) + 1)
if (N < 0):
raise ValueError('the n-simplex is only defined if n > -2')
self.__tuple = tuple(range(N))
except TypeError:
self.__tuple = tuple(X)
self.__set = frozenset(self.__tuple)
def tuple(self):
return self.__tuple
def set(self):
return self.__set
def is_face(self, other):
return self.__set.issubset(other.__set)
def __contains__(self, x):
return (x in self.__set)
def __getitem__(self, n):
return self.__tuple[n]
def __iter__(self):
return iter(self.__tuple)
def __add__(self, other):
return Simplex((self.__tuple + other.__tuple))
def face(self, n):
if ((n >= 0) and (n <= self.dimension())):
return Simplex((self.__tuple[:n] + self.__tuple[(n + 1):]))
else:
raise IndexError('{} does not have an nth face for n={}'.format(self, n))
def faces(self):
return [self.face(i) for i in range((self.dimension() + 1))]
def dimension(self):
return (len(self.__tuple) - 1)
def is_empty(self):
return (self.dimension() < 0)
def join(self, right, rename_vertices=True):
if rename_vertices:
vertex_set = ([('L' + str(v)) for v in self] + [('R' + str(w)) for w in right])
else:
vertex_set = (self.__tuple + right.__tuple)
return Simplex(vertex_set)
def product(self, other, rename_vertices=True):
if (not rename_vertices):
return [Simplex(x) for x in lattice_paths(self.tuple(), other.tuple())]
answer = []
for x in lattice_paths(self.tuple(), other.tuple()):
new = tuple([((('L' + str(v)) + 'R') + str(w)) for (v, w) in x])
answer.append(Simplex(new))
return answer
def alexander_whitney(self, dim):
return [(ZZ.one(), Simplex(self.tuple()[:(dim + 1)]), Simplex(self.tuple()[dim:]))]
def __eq__(self, other):
if (not isinstance(other, Simplex)):
return False
return (set(self) == set(other))
def __ne__(self, other):
return (not (self == other))
def __lt__(self, other):
if (not isinstance(other, Simplex)):
return False
try:
return (sorted(self) < sorted(other))
except TypeError:
return (sorted(map(str, self)) < sorted(map(str, other)))
def __hash__(self):
return hash(self.__set)
def _repr_(self):
return repr(self.__tuple)
def _latex_(self):
return latex(self.__tuple) |
_pydub_effect
def low_pass_filter(seg, cutoff_freq, order=5):
filter_fn = _mk_butter_filter(cutoff_freq, 'lowpass', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn) |
def add_gazebo_thruster_config(xacro_target, yaml_file=None, requested_macros=None, boiler_plate_top='', boiler_plate_bot=''):
xacro_file = open(xacro_target, 'ab')
xacro_file.write(boiler_plate_top)
if (requested_macros is None):
s = open(yaml_file, 'r')
requested_macros = yaml.safe_load(s)
if (requested_macros is None):
xacro_file.write(boiler_plate_bot)
xacro_file.close()
return
for (key, objects) in requested_macros.items():
for obj in objects:
xacro_file.write((' ' + macro_call_gen('wamv_gazebo_thruster_config', {'name': obj['prefix']})))
xacro_file.write(boiler_plate_bot)
xacro_file.close() |
def distance(c1, c2):
(c1r, c1g, c1b) = c1
(c2r, c2g, c2b) = c2
dr = (c1r - c2r)
dg = (c1g - c2g)
db = (c1b - c2b)
return (((dr * dr) + (dg * dg)) + (db * db)) |
def __call__(self, func):
(func)
def wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapper |
('detection', 'lstm', LSTMParams)
class ForecastBasedLSTM(ForcastBasedNeuralAD):
def __init__(self, config: LSTMParams):
super().__init__(config)
self.config = config
self.model = LSTM(config=self.config) |
def test_suppress_validation():
X = np.array([0, np.inf])
with pytest.raises(ValueError):
assert_all_finite(X)
sklearn.set_config(assume_finite=True)
assert_all_finite(X)
sklearn.set_config(assume_finite=False)
with pytest.raises(ValueError):
assert_all_finite(X) |
def ignore_comments(lines_enum):
for (line_number, line) in lines_enum:
line = COMMENT_RE.sub('', line)
line = line.strip()
if line:
(yield (line_number, line)) |
def mock_mask_rcnn_inference(tensor_mode, patched_module, check=True):
with mock.patch('{}.mask_rcnn_inference'.format(patched_module), side_effect=Caffe2MaskRCNNInference()) as mocked_func:
(yield)
if check:
assert (mocked_func.call_count > 0) |
class ContrastiveHead(nn.Module):
def __init__(self, temperature=0.2):
super(ContrastiveHead, self).__init__()
self.criterion = nn.CrossEntropyLoss()
self.temperature = temperature
def forward(self, pos, neg):
N = pos.size(0)
logits = torch.cat((pos, neg), dim=1)
logits /= self.temperature
labels = torch.zeros((N,), dtype=torch.long).cuda()
loss = self.criterion(logits, labels)
return loss |
class HybridSession(object):
def get_session(cls, agent, kb, lexicon, generator, manager, config=None):
if (kb.role == 'buyer'):
return BuyerHybridSession(agent, kb, lexicon, config, generator, manager)
elif (kb.role == 'seller'):
return SellerHybridSession(agent, kb, lexicon, config, generator, manager)
else:
raise ValueError('Unknown role: %s', kb.role) |
def make_jobarray_configs(dataset, nb_repetitions):
(train_horses, test_horses) = get_train_test(dataset, avoid_sir_holger=avoid_sir_holger)
output_dir = os.path.join('../run_scripts', job_name)
helpers.mkdir(output_dir)
counter_config = 1
for rep in range(nb_repetitions):
for (ind, test_subject) in enumerate(test_horses):
commands = []
if (config_dict['val_mode'] == 'subject'):
val_horses = get_val(dataset, test_subject)
if (config_dict['val_mode'] == 'no_val'):
val_horses = ''
train_subjects = [x for x in train_horses if ((x is not test_subject) and (x not in val_horses))]
commands.append('bash')
commands.append(('CONFIG_FILE=' + config_file))
commands.append(('TRAIN_SUBJECTS=' + '/'.join(train_subjects)))
commands.append(('VAL_SUBJECTS=' + '/'.join(val_horses)))
commands.append(('TEST_SUBJECTS=' + test_subject))
print(commands, '\n')
config_filename = (('config-' + str(counter_config)) + '.sh')
out_file = os.path.join(output_dir, config_filename)
print(out_file)
print('\n')
print('\n')
helpers.write_file(out_file, commands)
counter_config += 1 |
def labelcolormap(N):
cmap = np.zeros((N, 3), dtype=np.uint8)
for i in range(N):
r = 0
g = 0
b = 0
id = i
for j in range(7):
str_id = uint82bin(id)
r = (r ^ (np.uint8(str_id[(- 1)]) << (7 - j)))
g = (g ^ (np.uint8(str_id[(- 2)]) << (7 - j)))
b = (b ^ (np.uint8(str_id[(- 3)]) << (7 - j)))
id = (id >> 3)
cmap[(i, 0)] = r
cmap[(i, 1)] = g
cmap[(i, 2)] = b
return cmap |
def mean_percentile(image, footprint, out=None, mask=None, shift_x=False, shift_y=False, p0=0, p1=1):
return _apply(percentile_cy._mean, image, footprint, out=out, mask=mask, shift_x=shift_x, shift_y=shift_y, p0=p0, p1=p1) |
def test_sum():
dtypes = ['datetime64[s]', 'timedelta64[D]']
arrays = (np.arange(0, 12, dtype=dtype) for dtype in dtypes)
for array in arrays:
content = ak.contents.NumpyArray(array)
offsets = ak.index.Index64(np.array([0, 4, 8, 12], dtype=np.int64))
depth = ak.contents.ListOffsetArray(offsets, content)
if np.issubdtype(array.dtype, np.timedelta64):
assert (to_list(ak.sum(depth, (- 1), highlevel=False)) == [datetime.timedelta(6), datetime.timedelta(22), datetime.timedelta(38)])
assert (to_list(ak.sum(depth, 1, highlevel=False)) == [datetime.timedelta(6), datetime.timedelta(22), datetime.timedelta(38)])
assert (to_list(ak.sum(depth, (- 2), highlevel=False)) == [datetime.timedelta(12), datetime.timedelta(15), datetime.timedelta(18), datetime.timedelta(21)])
assert (to_list(ak.sum(depth, 0, highlevel=False)) == [datetime.timedelta(12), datetime.timedelta(15), datetime.timedelta(18), datetime.timedelta(21)])
else:
with pytest.raises(ValueError):
ak.sum(depth, (- 1), highlevel=False) |
def plot(data, title='Figure', legends=None, axis_x=None, axis_y=None, file_path=None, file_name=None, figure_size=(16, 9), has_grid=True, limits_axis_y=None, upper_lower_data=None, limits_axis_x=None):
plots = []
colors = ['steelblue', 'indianred', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna', 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange', 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon', 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna', 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange', 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon', 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna', 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange', 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon', 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna', 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange', 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon', 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna', 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange', 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon', 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna', 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange', 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon', 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna', 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange', 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon', 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna', 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange', 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon', 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna', 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange', 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon', 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna', 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange', 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon', 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna', 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange', 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon']
plt.rcParams['figure.figsize'] = figure_size
plt.title(title)
plt.grid(has_grid)
if (not (axis_x is None)):
plt.xlabel(axis_x)
if (not (axis_y is None)):
plt.ylabel(axis_y)
for d in range(len(data)):
(current_fig,) = plt.plot(data[d][0], data[d][1], color=colors[d])
if (not (upper_lower_data is None)):
plt.fill_between(data[d][0], np.array(upper_lower_data[d][0], dtype=float), np.array(upper_lower_data[d][1], dtype=float), where=(np.array(upper_lower_data[d][0], dtype=float) > np.array(upper_lower_data[d][1], dtype=float)), alpha=0.5, interpolate=True)
plots.append(current_fig)
if (not (legends is None)):
plt.legend(plots, legends)
if (not (limits_axis_y is None)):
plt.ylim(limits_axis_y[:2])
plt.yticks(np.arange(limits_axis_y[0], (limits_axis_y[1] + limits_axis_y[2]), limits_axis_y[2]))
if (not (limits_axis_x is None)):
plt.xlim(limits_axis_x[:2])
plt.xticks(np.arange(limits_axis_x[0], (limits_axis_x[1] + limits_axis_x[2]), limits_axis_x[2]))
if ((file_name is None) or (file_path is None)):
plt.show()
else:
full_path = path.join(file_path, file_name)
if (not path.isdir(file_path)):
makedirs(file_path)
plt.savefig(full_path, format='svg')
plt.close()
print(('Figure saved at %s successfully.' % full_path)) |
def add_fast_rcnn_losses(model):
(cls_prob, loss_cls) = model.net.SoftmaxWithLoss(['cls_score', 'labels_int32'], ['cls_prob', 'loss_cls'], scale=model.GetLossScale())
loss_bbox = model.net.SmoothL1Loss(['bbox_pred', 'bbox_targets', 'bbox_inside_weights', 'bbox_outside_weights'], 'loss_bbox', scale=model.GetLossScale())
loss_gradients = blob_utils.get_loss_gradients(model, [loss_cls, loss_bbox])
model.Accuracy(['cls_prob', 'labels_int32'], 'accuracy_cls')
model.AddLosses(['loss_cls', 'loss_bbox'])
model.AddMetrics('accuracy_cls')
return loss_gradients |
()
('--num_epochs', default=500)
('--num_train_tasks', default=100)
('--num_test_tasks', default=30)
('--encoder_hidden_size', default=200)
('--net_size', default=300)
('--num_steps_per_epoch', default=2000)
('--num_initial_steps', default=2000)
('--num_steps_prior', default=400)
('--num_extra_rl_steps_posterior', default=600)
('--batch_size', default=256)
('--embedding_batch_size', default=100)
('--embedding_mini_batch_size', default=100)
('--max_path_length', default=200)
_experiment
def pearl_half_cheetah_vel(ctxt=None, seed=1, num_epochs=500, num_train_tasks=100, num_test_tasks=30, latent_size=5, encoder_hidden_size=200, net_size=300, meta_batch_size=16, num_steps_per_epoch=2000, num_initial_steps=2000, num_tasks_sample=5, num_steps_prior=400, num_extra_rl_steps_posterior=600, batch_size=256, embedding_batch_size=100, embedding_mini_batch_size=100, max_path_length=200, reward_scale=5.0, use_gpu=False):
set_seed(seed)
encoder_hidden_sizes = (encoder_hidden_size, encoder_hidden_size, encoder_hidden_size)
env_sampler = SetTaskSampler((lambda : GarageEnv(normalize(HalfCheetahVelEnv()))))
env = env_sampler.sample(num_train_tasks)
test_env_sampler = SetTaskSampler((lambda : GarageEnv(normalize(HalfCheetahVelEnv()))))
runner = LocalRunner(ctxt)
augmented_env = PEARL.augment_env_spec(env[0](), latent_size)
qf = ContinuousMLPQFunction(env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
vf_env = PEARL.get_env_spec(env[0](), latent_size, 'vf')
vf = ContinuousMLPQFunction(env_spec=vf_env, hidden_sizes=[net_size, net_size, net_size])
inner_policy = TanhGaussianMLPPolicy(env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size])
pearl = PEARL(env=env, policy_class=ContextConditionedPolicy, encoder_class=MLPEncoder, inner_policy=inner_policy, qf=qf, vf=vf, num_train_tasks=num_train_tasks, num_test_tasks=num_test_tasks, latent_dim=latent_size, encoder_hidden_sizes=encoder_hidden_sizes, test_env_sampler=test_env_sampler, meta_batch_size=meta_batch_size, num_steps_per_epoch=num_steps_per_epoch, num_initial_steps=num_initial_steps, num_tasks_sample=num_tasks_sample, num_steps_prior=num_steps_prior, num_extra_rl_steps_posterior=num_extra_rl_steps_posterior, batch_size=batch_size, embedding_batch_size=embedding_batch_size, embedding_mini_batch_size=embedding_mini_batch_size, max_path_length=max_path_length, reward_scale=reward_scale)
set_gpu_mode(use_gpu, gpu_id=0)
if use_gpu:
pearl.to()
runner.setup(algo=pearl, env=env[0](), sampler_cls=LocalSampler, sampler_args=dict(max_path_length=max_path_length), n_workers=1, worker_class=PEARLWorker)
runner.train(n_epochs=num_epochs, batch_size=batch_size) |
class TestOptLGS(TestCase):
def test_quadratic_minimum(self):
lgs = OptimizerLGS()
result = lgs(f, ((), ()))
self.assertEqual(result['best'][0], 1.0)
self.assertEqual(result['best'][1], 2.0) |
def read_frequency_vocab(filename, min_freq):
filename = os.path.join(data.workspace.vocab, filename)
words = [UNK, EOS]
with open(filename, 'r', 'utf8') as fin:
for line in fin:
(freq, word) = line.rstrip('\n').split('\t')
if (word.strip() and (int(freq) >= min_freq)):
words.append(word.strip())
logging.info('Read %d words from %s', len(words), filename)
return words |
def process(filename):
music = muspy.read(filename)
if (not music.tracks):
return None
music.adjust_resolution(24)
if (music.get_real_end_time() > 1200):
return None
notes = {'Piano': [], 'Guitar': [], 'Bass': [], 'Strings': [], 'Brass': [], 'Drums': []}
for track in music.tracks:
if track.is_drum:
for note in track.notes:
if (note.duration > 0):
notes['Drums'].append(note)
else:
instrument = get_instrument(track.program)
if (instrument is not None):
for note in track.notes:
if (note.duration > 0):
notes[instrument].append(note)
for name in notes:
note_dict = {}
for note in notes[name]:
note_dict[(note.time, note.pitch, note.duration)] = note
notes[name] = list(note_dict.values())
notes[name].sort(key=operator.attrgetter('time', 'pitch', 'duration'))
if (sum((((len(v) > 10) and (k != 'Drums')) for (k, v) in notes.items())) < 2):
return None
music.tracks = []
for name in notes:
track = muspy.Track(name=name, program=(CONFIG['lmd']['programs'][name] if (name != 'Drums') else 0), is_drum=(name == 'Drums'), notes=notes[name])
track.sort()
music.tracks.append(track)
return music |
class Model(nn.Module):
def __init__(self, input_size, output_size):
super(Model, self).__init__()
self.fc = nn.Linear(input_size, output_size)
def forward(self, input):
output = self.fc(input)
return output |
def get_default_qat_qconfig(backend='fbgemm'):
if (backend == 'fbgemm'):
qconfig = QConfig(activation=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=0, quant_max=255, reduce_range=True), weight=default_per_channel_weight_fake_quant)
elif (backend == 'qnnpack'):
qconfig = QConfig(activation=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=0, quant_max=255, reduce_range=False), weight=default_weight_fake_quant)
else:
qconfig = default_qat_qconfig
return qconfig |
class KeyManager():
def __init__(self, timeline, keysize, num_keys):
self.timeline = timeline
self.lower_protocols = []
self.keysize = keysize
self.num_keys = num_keys
self.keys = []
self.times = []
def send_request(self):
for p in self.lower_protocols:
p.push(self.keysize, self.num_keys)
def pop(self, info):
self.keys.append(info)
self.times.append((self.timeline.now() * 1e-09)) |
_module()
class SyncRandomSizeHook(Hook):
def __init__(self, ratio_range=(14, 26), img_scale=(640, 640), interval=1, device='cuda'):
warnings.warn("DeprecationWarning: SyncRandomSizeHook is deprecated. Please use Resize pipeline to achieve similar functions. Due to the multi-process dataloader, its behavior is different from YOLOX's official implementation, the official is to change the size every fixed iteration interval and what we achieved is a fixed epoch interval.")
(self.rank, world_size) = get_dist_info()
self.is_distributed = (world_size > 1)
self.ratio_range = ratio_range
self.img_scale = img_scale
self.interval = interval
self.device = device
def after_train_epoch(self, runner):
if ((self.ratio_range is not None) and (((runner.epoch + 1) % self.interval) == 0)):
tensor = torch.LongTensor(2).to(self.device)
if (self.rank == 0):
size_factor = ((self.img_scale[1] * 1.0) / self.img_scale[0])
size = random.randint(*self.ratio_range)
size = (int((32 * size)), (32 * int((size * size_factor))))
tensor[0] = size[0]
tensor[1] = size[1]
if self.is_distributed:
dist.barrier()
dist.broadcast(tensor, 0)
runner.data_loader.dataset.update_dynamic_scale((tensor[0].item(), tensor[1].item())) |
class Sentence(object):
def __init__(self, sentence):
super(Sentence, self).__init__()
sent_text = sentence[0]
sent_ptags = sentence[1]
self.sent = sent_text
self.tokens = sent_text.split(' ')
self.pos = sent_ptags.split(' ')
self.phrases = {}
self.phrases[(- 1)] = Phrase((- 1), 0, len(self.tokens), len(self.tokens), 'S', sent_text, None, (- 1))
parents_temp = {}
for phrase in sentence[2:]:
phrase_chunks = phrase.split('\t')
phrase_idx = int(phrase_chunks[0])
align_idx = int(phrase_chunks[1])
start_idx = int(phrase_chunks[2])
end_idx = int(phrase_chunks[3])
parent_idx = int(phrase_chunks[4])
text = phrase_chunks[5]
label = phrase_chunks[6]
size = (end_idx - start_idx)
self.phrases[phrase_idx] = Phrase(phrase_idx, start_idx, end_idx, size, label, text, parent_idx, align_idx)
if (parent_idx not in parents_temp):
parents_temp[parent_idx] = [phrase_idx]
else:
parents_temp[parent_idx].append(phrase_idx)
for idx in parents_temp.keys():
if (idx == (- 1)):
continue
else:
self.phrases[idx].childen = parents_temp[idx] |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--task', required=True, choices=TASKS)
parser.add_argument('--align', required=True, choices=ALIGNS, help='the align model to use')
parser.add_argument('--aspect', required=True, help='the aspect to evaluate')
parser.add_argument('--hypo', required=True, help='a file with all hypothesized texts to evaluate, line-by-line')
parser.add_argument('--remove_stopwords', default=False, action='store_true', help='whether to remove stopwords in aligning')
parser.add_argument('--scores_save_path', default='/tmp/scores.txt', help='a path to save example-wise scores.')
parser.add_argument('--fact', help='a file with all facts, line-by-line')
parser.add_argument('--dialog_history', help='a file with all dialog histories, line-by-line')
parser.add_argument('--doc', help='a file with all documents, line-by-line')
parser.add_argument('--refs', help='a file with all references, line-by-line. if each document has more than one reference, divide them by "|||"')
parser.add_argument('--input_sent', help='a file with all input sentences, line-by-line')
parser.add_argument('--grounding', help='a file with all grounding texts, line-by-line')
return parser.parse_args() |
def create_model(weight_path):
model = smp.Unet(encoder_name='mobilenet_v2', encoder_weights=None, in_channels=3, classes=2)
model.load_state_dict(torch.load(weight_path))
return model |
def analyze_SelectStmt(node: SelectStmt, cache: dict):
limit = (node.limitCount.val.ival if node.limitCount else (- 1))
sql_dnf_predicates = convert2dnf(node.whereClause)
if (isinstance(sql_dnf_predicates, BoolExpr) and (sql_dnf_predicates.boolop == BoolExprType.OR_EXPR)):
choices = sorted(sql_dnf_predicates.args, key=(lambda x: if_all_structural(x)), reverse=True)
res = []
for choice in choices:
(choice_res, column_info) = execute_and(choice, node, (limit - len(res)), cache)
res.extend(choice_res)
if ((len(res) >= limit) and (limit != (- 1))):
break
return (res, column_info)
elif (isinstance(sql_dnf_predicates, BoolExpr) and (sql_dnf_predicates.boolop == BoolExprType.AND_EXPR)):
return execute_and(sql_dnf_predicates, node, limit, cache)
elif (isinstance(sql_dnf_predicates, A_Expr) or (isinstance(sql_dnf_predicates, BoolExpr) and (sql_dnf_predicates.boolop == BoolExprType.NOT_EXPR))):
return execute_and(sql_dnf_predicates, node, limit, cache)
else:
raise ValueError('Expects sql to be in DNF, but is not: {}'.format(RawStream()(sql_dnf_predicates))) |
class InfoGainSplitCriterion(SplitCriterion):
def __init__(self, min_branch_frac_option=0.01):
super().__init__()
self.min_branch_frac_option = min_branch_frac_option
def get_merit_of_split(self, pre_split_dist, post_split_dist):
if (self.num_subsets_greater_than_frac(post_split_dist, self.min_branch_frac_option) < 2):
return (- np.inf)
return (self.compute_entropy(pre_split_dist) - self.compute_entropy(post_split_dist))
def get_range_of_merit(pre_split_dist):
num_classes = len(pre_split_dist)
num_classes = (num_classes if (num_classes > 2) else 2)
return np.log2(num_classes)
def compute_entropy(self, dist):
if isinstance(dist, dict):
return self._compute_entropy_dict(dist)
elif isinstance(dist, list):
return self._compute_entropy_list(dist)
def _compute_entropy_dict(dist):
entropy = 0.0
dis_sums = 0.0
for (_, d) in dist.items():
if (d > 0.0):
entropy -= (d * np.log2(d))
dis_sums += d
return (((entropy + (dis_sums * np.log2(dis_sums))) / dis_sums) if (dis_sums > 0.0) else 0.0)
def _compute_entropy_list(self, dists):
total_weight = 0.0
dist_weights = ([0.0] * len(dists))
for i in range(len(dists)):
dist_weights[i] = sum(dists[i].values())
total_weight += dist_weights[i]
entropy = 0.0
for i in range(len(dists)):
entropy += (dist_weights[i] * self.compute_entropy(dists[i]))
return (entropy / total_weight)
def num_subsets_greater_than_frac(distributions, min_frac):
total_weight = 0.0
dist_sums = ([0.0] * len(distributions))
for i in range(len(dist_sums)):
dist_sums[i] = sum(distributions[i].values())
total_weight += dist_sums[i]
num_greater = 0
if (total_weight > 0):
for d in dist_sums:
if ((d / total_weight) > min_frac):
num_greater += 1
return num_greater |
_grad()
def test(model, x_eval, y_eval, evaluator):
model.eval()
y_pred = model(x_eval).argmax(dim=(- 1))
return evaluator.eval({'y_true': y_eval, 'y_pred': y_pred})['acc'] |
class W2lKenLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.silence = (tgt_dict.index('<ctc_blank>') if ('<ctc_blank>' in tgt_dict.indices) else tgt_dict.bos())
self.lexicon = load_words(args.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index('<unk>')
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for (i, (word, spellings)) in enumerate(self.lexicon.items()):
word_idx = self.word_dict.get_index(word)
(_, score) = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (tgt_dict.unk() not in spelling_idxs), f'{spelling} {spelling_idxs}'
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(beam_size=args.beam, beam_size_token=int(getattr(args, 'beam_size_token', len(tgt_dict))), beam_threshold=args.beam_threshold, lm_weight=args.lm_weight, word_score=args.word_score, unk_score=args.unk_weight, sil_score=args.sil_weight, log_add=False, criterion_type=self.criterion_type)
if (self.asg_transitions is None):
N = 768
self.asg_transitions = []
self.decoder = LexiconDecoder(self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, self.asg_transitions, False)
def decode(self, emissions):
(B, T, N) = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = (emissions.data_ptr() + ((4 * b) * emissions.stride(0)))
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[:self.nbest]
hypos.append([{'tokens': self.get_tokens(result.tokens), 'score': result.score, 'words': [self.word_dict.get_entry(x) for x in result.words if (x >= 0)]} for result in nbest_results])
return hypos |
def gen_testloss(args):
params = utils.load_params(args.model_dir)
ckpt_dir = os.path.join(args.model_dir, 'checkpoints')
ckpt_paths = [int(e[11:(- 3)]) for e in os.listdir(ckpt_dir) if (e[(- 3):] == '.pt')]
ckpt_paths = np.sort(ckpt_paths)
headers = ['epoch', 'step', 'loss', 'discrimn_loss_e', 'compress_loss_e', 'discrimn_loss_t', 'compress_loss_t']
csv_path = utils.create_csv(args.model_dir, 'losses_test.csv', headers)
print('writing to:', csv_path)
test_transforms = tf.load_transforms('test')
testset = tf.load_trainset(params['data'], test_transforms, train=False)
testloader = DataLoader(testset, batch_size=params['bs'], shuffle=False, num_workers=4)
criterion = MaximalCodingRateReduction(gam1=params['gam1'], gam2=params['gam2'], eps=params['eps'])
for (epoch, ckpt_path) in enumerate(ckpt_paths):
(net, epoch) = tf.load_checkpoint(args.model_dir, epoch=epoch, eval_=True)
for (step, (batch_imgs, batch_lbls)) in enumerate(testloader):
features = net(batch_imgs.cuda())
(loss, loss_empi, loss_theo) = criterion(features, batch_lbls, num_classes=len(testset.num_classes))
utils.save_state(args.model_dir, epoch, step, loss.item(), *loss_empi, *loss_theo, filename='losses_test.csv')
print('Finished generating test loss.') |
def max_pool(inputs, kernel=3):
padding = ((kernel - 1) // 2)
max = F.max_pool3d(inputs, kernel_size=kernel, stride=1, padding=padding)
keep = (inputs == max).float()
return (keep * inputs) |
def plot_edges_from_adj(adj, coordinates, emph_short_edges=True, format=None, save_to=None, set_title=True, min_weight=0.1, show=True, k_hops_is_short=1, arrows=False, horizon=(- 1), resolution=5):
graph = get_graph_from_adj(adj, coordinates, min_weight=min_weight)
print(f'#Nodes: {graph.number_of_nodes()}, #Edges: {graph.number_of_edges()}')
cm = 180
(fig, ax) = init_world_fig(cm=cm)
pos = {}
for (node, (lat_i, lon_i)) in enumerate(coordinates):
pos[node] = ((lon_i - cm), lat_i)
nx.draw_networkx_nodes(graph, pos, node_color='white', node_size=0.01)
nx.draw_networkx_edges(graph, pos=pos, edge_color='darkgreen', alpha=0.05, arrows=False)
if emph_short_edges:
short_adj = mask_adj_out(adj, coordinates=coordinates, max_distance=(resolution * k_hops_is_short))
graph = get_graph_from_adj(short_adj, coordinates, min_weight=min_weight)
nx.draw_networkx_edges(graph, pos=pos, edge_color='navy', alpha=0.7, arrows=arrows, width=0.8)
short_adj = mask_adj_out(adj, coordinates=coordinates, max_distance=(resolution * 1))
graph = get_graph_from_adj(short_adj, coordinates, min_weight=min_weight)
nx.draw_networkx_edges(graph, pos=pos, edge_color='orange', alpha=0.94, arrows=arrows, arrowsize=5, width=(1.2 if arrows else 0.8))
if (save_to is not None):
plt.savefig(save_to, bbox_inches='tight', format=format, dpi=500)
if show:
plt.show()
else:
print('Omitting plotting') |
class CubicHeckeDataSection(Enum):
basis = 'basis'
regular_left = 'regular_left'
regular_right = 'regular_right'
split_irred = 'split_irred'
markov_tr_cfs = 'markov_tr_cfs' |
class SimpleCNN(nn.Module):
def __init__(self, in_channel, pred_dim, num_layers=5):
super(SimpleCNN, self).__init__()
chan = 64
stride = 1
self.layers = []
for layer_num in list(range(num_layers)):
if (layer_num == 0):
in_dim = in_channel
kernel_size = 7
pad = 3
else:
in_dim = chan
kernel_size = 5
pad = 2
out_dim = chan
dilation = 1
print(('in, out, stride, dilation: %d, %d, %d, %d' % (in_dim, out_dim, stride, dilation)))
self.layers.append(self.generate_conv_block(in_dim, out_dim, kernel_size=kernel_size, stride=stride, dilation=dilation, bias=False))
self.final_layer = nn.Conv2d(in_channels=chan, out_channels=pred_dim, kernel_size=1, stride=1, padding=0)
def generate_conv_block(self, in_dim, out_dim, kernel_size=3, stride=1, dilation=2, bias=True):
block = nn.Sequential(Pad(dilation), SparseConv(in_channels=in_dim, out_channels=out_dim, kernel_size=3, stride=stride, padding=0, dilation=dilation, bias=bias), BatchNorm(out_dim), Relu()).cuda()
return block
def forward(self, feat, mask):
for layer in self.layers:
(feat, mask) = layer((feat, mask))
feat = self.final_layer(feat)
return (feat, mask) |
.gpu
def test_memory_pool_tasklet():
def tester(A: CudaArray, B: CudaArray):
tmp = (A + 1)
with dace.tasklet(dace.Language.CPP):
(t << tmp)
(b >> B)
A[:] = B
sdfg = tester.to_sdfg()
for arr in sdfg.arrays.values():
arr.storage = dace.StorageType.GPU_Global
arr.pool = True
for (me, _) in sdfg.all_nodes_recursive():
if isinstance(me, dace.nodes.MapEntry):
me.schedule = dace.ScheduleType.GPU_Device
code = sdfg.generate_code()[0].clean_code
assert (code.count('cudaMallocAsync') == 1)
assert (code.count('cudaFreeAsync') == 1)
import cupy as cp
a = cp.random.rand(20)
b = cp.random.rand(20)
b_expected = cp.copy(b)
sdfg(a, b)
assert cp.allclose(a, b_expected)
assert cp.allclose(b, b_expected) |
def gen_nsml_report(acc_train, aux_out_train, acc_dev, aux_out_dev):
(ave_loss, acc_sc, acc_sa, acc_wn, acc_wc, acc_wo, acc_wvi, acc_wv, acc_lx, acc_x) = acc_train
(grad_abs_mean_mean, grad_abs_mean_sig, grad_abs_sig_mean) = aux_out_train
(ave_loss_t, acc_sc_t, acc_sa_t, acc_wn_t, acc_wc_t, acc_wo_t, acc_wvi_t, acc_wv_t, acc_lx_t, acc_x_t) = acc_dev
nsml.report(step=epoch, epoch=epoch, epochs_total=args.tepoch, train__loss=ave_loss, train__acc_sc=acc_sc, train__acc_sa=acc_sa, train__acc_wn=acc_wn, train__acc_wc=acc_wc, train__acc_wo=acc_wo, train__acc_wvi=acc_wvi, train__acc_wv=acc_wv, train__acc_lx=acc_lx, train__acc_x=acc_x, train_grad_abs_mean_mean=float(grad_abs_mean_mean), train_grad_abs_mean_sig=float(grad_abs_mean_sig), train_grad_abs_sig_mean=float(grad_abs_sig_mean), dev__loss=ave_loss_t, dev__acc_sc_t=acc_sc_t, dev__acc_sa_t=acc_sa_t, dev__acc_wn_t=acc_wn_t, dev__acc_wc_t=acc_wc_t, dev__acc_wo_t=acc_wo_t, dev__acc_wvi_t=acc_wvi_t, dev__acc_wv_t=acc_wv_t, dev__acc_lx_t=acc_lx_t, dev__acc_x=acc_x_t, scope=locals()) |
def plot_dist_for_two_four_room_tasks(**kwargs):
task1 = 'LearnEightPoliciesTileCodingFeat'
task2 = 'HighVarianceLearnEightPoliciesTileCodingFeat'
save_dir = os.path.join('pdf_plots', 'Misc', 'CompareDistsFR')
d_mu1 = load_d_mu(task1)
d_mu2 = load_d_mu(task2)
state_values1 = load_state_values(task1)
state_values2 = load_state_values(task2)
for policy_no in range(state_values1.shape[0]):
(fig, ax) = plt.subplots(figsize=kwargs['fig_size'])
active_states = find_active_states(task1, d_mu1, state_values1, policy_no)
active_d_mu = get_active_d_mu(task1, d_mu1, active_states, policy_no)
plot_d_mu(ax, active_d_mu, active_states)
active_states = find_active_states(task2, d_mu2, state_values2, policy_no)
active_d_mu = get_active_d_mu(task2, d_mu2, active_states, policy_no)
plot_d_mu(ax, active_d_mu, active_states)
if (not os.path.exists(save_dir)):
os.makedirs(save_dir, exist_ok=True)
fig.savefig(os.path.join(save_dir, f'dist_policy_{policy_no}.pdf'), format='pdf', dpi=1000, bbox_inches='tight')
plt.show() |
def register_Ns3LteGlobalPathlossDatabase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteGlobalPathlossDatabase const &', 'arg0')])
cls.add_method('GetPathloss', 'double', [param('uint16_t', 'cellId'), param('uint64_t', 'imsi')])
cls.add_method('Print', 'void', [])
cls.add_method('UpdatePathloss', 'void', [param('std::string', 'context'), param('ns3::Ptr< ns3::SpectrumPhy const >', 'txPhy'), param('ns3::Ptr< ns3::SpectrumPhy const >', 'rxPhy'), param('double', 'lossDb')], is_pure_virtual=True, is_virtual=True)
return |
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1], scope='Conv2d_0c_7x1')
mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1')
net += (scale * up)
if activation_fn:
net = activation_fn(net)
return net |
_grad()
def test(model, predictor, data, split_edge, evaluator, batch_size):
model.eval()
h = model(data.x, data.adj_t)
pos_train_edge = split_edge['train']['edge'].to(h.device)
pos_valid_edge = split_edge['valid']['edge'].to(h.device)
neg_valid_edge = split_edge['valid']['edge_neg'].to(h.device)
pos_test_edge = split_edge['test']['edge'].to(h.device)
neg_test_edge = split_edge['test']['edge_neg'].to(h.device)
pos_train_preds = []
for perm in DataLoader(range(pos_train_edge.size(0)), batch_size):
edge = pos_train_edge[perm].t()
pos_train_preds += [predictor(h[edge[0]], h[edge[1]]).squeeze().cpu()]
pos_train_pred = torch.cat(pos_train_preds, dim=0)
pos_valid_preds = []
for perm in DataLoader(range(pos_valid_edge.size(0)), batch_size):
edge = pos_valid_edge[perm].t()
pos_valid_preds += [predictor(h[edge[0]], h[edge[1]]).squeeze().cpu()]
pos_valid_pred = torch.cat(pos_valid_preds, dim=0)
neg_valid_preds = []
for perm in DataLoader(range(neg_valid_edge.size(0)), batch_size):
edge = neg_valid_edge[perm].t()
neg_valid_preds += [predictor(h[edge[0]], h[edge[1]]).squeeze().cpu()]
neg_valid_pred = torch.cat(neg_valid_preds, dim=0)
pos_test_preds = []
for perm in DataLoader(range(pos_test_edge.size(0)), batch_size):
edge = pos_test_edge[perm].t()
pos_test_preds += [predictor(h[edge[0]], h[edge[1]]).squeeze().cpu()]
pos_test_pred = torch.cat(pos_test_preds, dim=0)
neg_test_preds = []
for perm in DataLoader(range(neg_test_edge.size(0)), batch_size):
edge = neg_test_edge[perm].t()
neg_test_preds += [predictor(h[edge[0]], h[edge[1]]).squeeze().cpu()]
neg_test_pred = torch.cat(neg_test_preds, dim=0)
results = {}
for K in [10, 50, 100]:
evaluator.K = K
train_hits = evaluator.eval({'y_pred_pos': pos_train_pred, 'y_pred_neg': neg_valid_pred})[f'{K}']
valid_hits = evaluator.eval({'y_pred_pos': pos_valid_pred, 'y_pred_neg': neg_valid_pred})[f'{K}']
test_hits = evaluator.eval({'y_pred_pos': pos_test_pred, 'y_pred_neg': neg_test_pred})[f'{K}']
results[f'{K}'] = (train_hits, valid_hits, test_hits)
return results |
_numpy_output(check_dtype=True)
def test_ufunc_bitwise_and_uu(A: dace.uint32[10], B: dace.uint32[10]):
return np.bitwise_and(A, B) |
def get_logger(log_filename='multiproc_mpi.log'):
open(log_filename, 'w').close()
log_id = ('master' if (mpi_rank == 0) else ('slave%d' % mpi_comm.rank))
logger = logging.getLogger(log_id)
logger.setLevel(logging.INFO)
mh = MPIFileHandler(log_filename)
formatter = logging.Formatter(('%(asctime)s - %(levelname)s' + ' - %(name)s: %(message)s'))
mh.setFormatter(formatter)
logger.addHandler(mh)
return logger |
def main():
args = get_args_from_command_line()
if (args.gpu_id is not None):
cfg.CONST.DEVICE = args.gpu_id
if (args.phase is not None):
cfg.NETWORK.PHASE = args.phase
if (args.weights is not None):
cfg.CONST.WEIGHTS = args.weights
if (args.data_path is not None):
cfg.DIR.DATASET_ROOT = args.data_path
if (cfg.DATASET.DATASET_NAME == 'VideoDeblur'):
cfg.DIR.IMAGE_BLUR_PATH = os.path.join(args.data_path, '%s/%s/input/%s.jpg')
cfg.DIR.IMAGE_CLEAR_PATH = os.path.join(args.data_path, '%s/%s/GT/%s.jpg')
if (cfg.DATASET.DATASET_NAME == 'VideoDeblurReal'):
cfg.DIR.IMAGE_BLUR_PATH = os.path.join(args.data_path, '%s/%s/input/%s.jpg')
cfg.DIR.IMAGE_CLEAR_PATH = os.path.join(args.data_path, '%s/%s/input/%s.jpg')
if (args.out_path is not None):
cfg.DIR.OUT_PATH = args.out_path
print('Use config:')
pprint(cfg)
if ((type(cfg.CONST.DEVICE) == str) and (not (cfg.CONST.DEVICE == 'all'))):
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.CONST.DEVICE
print(('CUDA DEVICES NUMBER: ' + str(torch.cuda.device_count())))
bulid_net(cfg) |
def extend_and_repeat(tensor, axis, repeat):
return jnp.repeat(jnp.expand_dims(tensor, axis), repeat, axis=axis) |
def mk_state(car, value):
return state([(num(value) if (cars[i] == car) else bound(i)) for i in range(num_cars)]) |
class Distribution(object):
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None, version=None, py_version=PY_MAJOR, platform=None, precedence=EGG_DIST):
self.project_name = safe_name((project_name or 'Unknown'))
if (version is not None):
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = (metadata or empty_provider)
def from_location(cls, location, basename, metadata=None, **kw):
(project_name, version, py_version, platform) = ([None] * 4)
(basename, ext) = os.path.splitext(basename)
if (ext.lower() in _distributionImpl):
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
(project_name, version, py_version, platform) = match.group('name', 'ver', 'pyver', 'plat')
return cls(location, metadata, project_name=project_name, version=version, py_version=py_version, platform=platform, **kw)._reload_version()
def _reload_version(self):
return self
def hashcmp(self):
return (self.parsed_version, self.precedence, self.key, _remove_md5_fragment(self.location), (self.py_version or ''), (self.platform or ''))
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return (self.hashcmp < other.hashcmp)
def __le__(self, other):
return (self.hashcmp <= other.hashcmp)
def __gt__(self, other):
return (self.hashcmp > other.hashcmp)
def __ge__(self, other):
return (self.hashcmp >= other.hashcmp)
def __eq__(self, other):
if (not isinstance(other, self.__class__)):
return False
return (self.hashcmp == other.hashcmp)
def __ne__(self, other):
return (not (self == other))
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
def parsed_version(self):
if (not hasattr(self, '_parsed_version')):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if (not is_legacy):
return
if (not self.version):
return
tmpl = textwrap.dedent("\n '{project_name} ({version})' is being parsed as a legacy,\n non PEP 440,\n version. You may find odd behavior and sort order.\n In particular it will be sorted as less than 0.0. It\n is recommended to migrate to PEP 440 compatible\n versions.\n ").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
def version(self):
try:
return self._version
except AttributeError:
version = _version_from_file(self._get_metadata(self.PKG_INFO))
if (version is None):
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError((tmpl % self.PKG_INFO), self)
return version
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._filter_extras(self._build_dep_map())
return self.__dep_map
def _filter_extras(dm):
for extra in list(filter(None, dm)):
new_extra = extra
reqs = dm.pop(extra)
(new_extra, _, marker) = extra.partition(':')
fails_marker = (marker and (invalid_marker(marker) or (not evaluate_marker(marker))))
if fails_marker:
reqs = []
new_extra = (safe_extra(new_extra) or None)
dm.setdefault(new_extra, []).extend(reqs)
return dm
def _build_dep_map(self):
dm = {}
for name in ('requires.txt', 'depends.txt'):
for (extra, reqs) in split_sections(self._get_metadata(name)):
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(('%s has no such extra feature %r' % (self, ext)))
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
(yield line)
def activate(self, path=None, replace=False):
if (path is None):
path = sys.path
self.insert_on(path, replace=replace)
if (path is sys.path):
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if (pkg in sys.modules):
declare_namespace(pkg)
def egg_name(self):
filename = ('%s-%s-py%s' % (to_filename(self.project_name), to_filename(self.version), (self.py_version or PY_MAJOR)))
if self.platform:
filename += ('-' + self.platform)
return filename
def __repr__(self):
if self.location:
return ('%s (%s)' % (self, self.location))
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = (version or '[unknown version]')
return ('%s %s' % (self.project_name, version))
def __getattr__(self, attr):
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
def __dir__(self):
return list((set(super(Distribution, self).__dir__()) | set((attr for attr in self._provider.__dir__() if (not attr.startswith('_'))))))
if (not hasattr(object, '__dir__')):
del __dir__
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(_normalize_cached(filename), os.path.basename(filename), metadata, **kw)
def as_requirement(self):
if isinstance(self.parsed_version, packaging.version.Version):
spec = ('%s==%s' % (self.project_name, self.parsed_version))
else:
spec = ('%s===%s' % (self.project_name, self.parsed_version))
return Requirement.parse(spec)
def load_entry_point(self, group, name):
ep = self.get_entry_info(group, name)
if (ep is None):
raise ImportError(('Entry point %r not found' % ((group, name),)))
return ep.load()
def get_entry_map(self, group=None):
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(self._get_metadata('entry_points.txt'), self)
if (group is not None):
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
loc = (loc or self.location)
if (not loc):
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [((p and _normalize_cached(p)) or p) for p in path]
for (p, item) in enumerate(npath):
if (item == nloc):
if replace:
break
else:
return
elif ((item == bdir) and (self.precedence == EGG_DIST)):
if ((not replace) and (nloc in npath[p:])):
return
if (path is sys.path):
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if (path is sys.path):
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
while True:
try:
np = npath.index(nloc, (p + 1))
except ValueError:
break
else:
del npath[np], path[np]
p = np
return
def check_version_conflict(self):
if (self.key == 'setuptools'):
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if ((modname not in sys.modules) or (modname in nsp) or (modname in _namespace_packages)):
continue
if (modname in ('pkg_resources', 'setuptools', 'site')):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if (fn and (normalize_path(fn).startswith(loc) or fn.startswith(self.location))):
continue
issue_warning(('Module %s was already imported from %s, but %s is being added to sys.path' % (modname, fn, self.location)))
def has_version(self):
try:
self.version
except ValueError:
issue_warning(('Unbuilt egg for ' + repr(self)))
return False
return True
def clone(self, **kw):
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
def extras(self):
return [dep for dep in self._dep_map if dep] |
_module()
class LoadPanopticAnnotations(object):
def __init__(self, reduce_zero_label=False, file_client_args=dict(backend='disk'), imdecode_backend='pillow'):
self.reduce_zero_label = reduce_zero_label
self.file_client_args = file_client_args.copy()
self.file_client = None
self.imdecode_backend = imdecode_backend
def __call__(self, results):
if (results.get('seg_prefix', None) is not None):
filename = osp.join(results['seg_prefix'], results['ann_info']['seg_map'])
else:
filename = results['ann_info']['seg_map']
gt_panoptic_seg = Image.open(filename)
gt_panoptic_seg = np.asarray(gt_panoptic_seg, dtype=np.float32)
results['gt_panoptic_seg'] = gt_panoptic_seg
results['seg_fields'].append('gt_panoptic_seg')
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(reduce_zero_label={self.reduce_zero_label},'
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
return repr_str |
class TestModelFromArtisDensityAbundancesAllAscii():
(autouse=True)
def setup(self, example_model_file_dir, atomic_dataset):
self.config = Configuration.from_yaml((example_model_file_dir / 'tardis_configv1_ascii_density_abund.yml'))
self.config.model.structure.filename = 'density.dat'
self.config.model.abundances.filename = 'abund.dat'
self.simulation_state = SimulationState.from_config(self.config, atom_data=atomic_dataset)
def test_velocities(self):
assert hasattr(self.simulation_state.v_inner, 'unit')
assert_almost_equal(self.simulation_state.v_inner[0].to((u.km / u.s)).value, 11000)
def test_abundances(self):
assert_almost_equal(self.simulation_state.abundance.loc[(14, 0)], 0.1)
assert_almost_equal(self.simulation_state.abundance.loc[(14, 1)], 0.2)
assert_almost_equal(self.simulation_state.abundance.loc[(14, 2)], 0.2)
assert_almost_equal(self.simulation_state.abundance.loc[(14, 3)], 0.2)
assert_almost_equal(self.simulation_state.abundance.loc[(14, 4)], 0.2)
assert_almost_equal(self.simulation_state.abundance.loc[(14, 5)], 0.2)
assert_almost_equal(self.simulation_state.abundance.loc[(14, 6)], 0.0)
assert_almost_equal(self.simulation_state.abundance.loc[(6, 0)], 0.0)
assert_almost_equal(self.simulation_state.abundance.loc[(6, 1)], 0.0)
assert_almost_equal(self.simulation_state.abundance.loc[(6, 2)], 0.0)
assert_almost_equal(self.simulation_state.abundance.loc[(6, 3)], 0.0)
assert_almost_equal(self.simulation_state.abundance.loc[(6, 4)], 0.0)
assert_almost_equal(self.simulation_state.abundance.loc[(6, 5)], 0.0)
assert_almost_equal(self.simulation_state.abundance.loc[(6, 6)], 0.5)
def test_densities(self):
assert_almost_equal(self.simulation_state.density[0].to(u.Unit('g/cm3')).value, (9.7656229e-11 / (13.0 ** 3)))
assert_almost_equal(self.simulation_state.density[1].to(u.Unit('g/cm3')).value, (4.8170911e-11 / (13.0 ** 3)))
assert_almost_equal(self.simulation_state.density[2].to(u.Unit('g/cm3')).value, (2.56e-11 / (13.0 ** 3)))
assert_almost_equal(self.simulation_state.density[3].to(u.Unit('g/cm3')).value, (1.4450533e-11 / (13.0 ** 3)))
assert_almost_equal(self.simulation_state.density[4].to(u.Unit('g/cm3')).value, (8.5733893e-11 / (13.0 ** 3)))
assert_almost_equal(self.simulation_state.density[5].to(u.Unit('g/cm3')).value, (5.3037103e-11 / (13.0 ** 3)))
assert_almost_equal(self.simulation_state.density[6].to(u.Unit('g/cm3')).value, (3.3999447e-11 / (13.0 ** 3))) |
class Swin2SRImageProcessor(metaclass=DummyObject):
_backends = ['vision']
def __init__(self, *args, **kwargs):
requires_backends(self, ['vision']) |
def get_output(input_text, input_len=128, output_len=128):
input_ids = torch.cat([tokenizer(inp, padding='max_length', max_length=input_len, return_tensors='pt').input_ids.to('cuda') for inp in input_text])
outputs = model.generate(input_ids, max_length=output_len)
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
return outputs |
def cdd_Hrepresentation(cdd_type, ieqs, eqns, file_output=None):
ieqs = _set_to_None_if_empty(ieqs)
eqns = _set_to_None_if_empty(eqns)
(num, ambient_dim) = _common_length_of(ieqs, eqns)
ambient_dim -= 1
if (cdd_type == 'real'):
from sage.rings.real_double import RDF
base_ring = RDF
else:
base_ring = None
s = 'H-representation\n'
if (eqns is not None):
assert (len(eqns) > 0)
n = len(eqns)
s += (('linearity ' + repr(n)) + ' ')
s += (_to_space_separated_string(range(1, (n + 1))) + '\n')
s += 'begin\n'
s += ((((((' ' + repr(num)) + ' ') + repr((ambient_dim + 1))) + ' ') + cdd_type) + '\n')
if (eqns is not None):
for e in eqns:
s += ((' ' + _to_space_separated_string(e, base_ring)) + '\n')
if (ieqs is not None):
for i in ieqs:
s += ((' ' + _to_space_separated_string(i, base_ring)) + '\n')
s += 'end\n'
if (file_output is not None):
in_file = open(file_output, 'w')
in_file.write(s)
in_file.close()
else:
return s |
def _sizeof_fmt(num):
units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB']
decimals = [0, 0, 1, 2, 2, 2]
if (num > 1):
exponent = min(int(log(num, 1024)), (len(units) - 1))
quotient = (float(num) / (1024 ** exponent))
unit = units[exponent]
num_decimals = decimals[exponent]
format_string = ('{0:.%sf} {1}' % num_decimals)
return format_string.format(quotient, unit)
return ('0 bytes' if (num == 0) else '1 byte') |
def _read_structdesc(f):
structdesc = {}
structstart = _read_long(f)
if (structstart != 9):
raise Exception('STRUCTSTART should be 9')
structdesc['name'] = _read_string(f)
predef = _read_long(f)
structdesc['ntags'] = _read_long(f)
structdesc['nbytes'] = _read_long(f)
structdesc['predef'] = (predef & 1)
structdesc['inherits'] = (predef & 2)
structdesc['is_super'] = (predef & 4)
if (not structdesc['predef']):
structdesc['tagtable'] = [_read_tagdesc(f) for _ in range(structdesc['ntags'])]
for tag in structdesc['tagtable']:
tag['name'] = _read_string(f)
structdesc['arrtable'] = {tag['name']: _read_arraydesc(f) for tag in structdesc['tagtable'] if tag['array']}
structdesc['structtable'] = {tag['name']: _read_structdesc(f) for tag in structdesc['tagtable'] if tag['structure']}
if (structdesc['inherits'] or structdesc['is_super']):
structdesc['classname'] = _read_string(f)
structdesc['nsupclasses'] = _read_long(f)
structdesc['supclassnames'] = [_read_string(f) for _ in range(structdesc['nsupclasses'])]
structdesc['supclasstable'] = [_read_structdesc(f) for _ in range(structdesc['nsupclasses'])]
STRUCT_DICT[structdesc['name']] = structdesc
else:
if (structdesc['name'] not in STRUCT_DICT):
raise Exception("PREDEF=1 but can't find definition")
structdesc = STRUCT_DICT[structdesc['name']]
return structdesc |
class LinearAttention(nn.Module):
def __init__(self, d_model, n_heads, feature_map_cfg=None, eps=1e-06, dropout=0.0):
super().__init__()
query_dims = (d_model // n_heads)
self.n_heads = n_heads
self.feature_map = (hydra.utils.instantiate(feature_map_cfg, query_dims) if (feature_map_cfg is not None) else elu_feature_map(query_dims))
self.eps = eps
def forward(self, query, key, value, attn_mask=None, key_padding_mask=None, need_weights=False):
query = rearrange(query, 'b t (h e) -> b h t e', h=self.n_heads)
key = rearrange(key, 'b s (h e) -> b h s e', h=self.n_heads)
value = rearrange(value, 'b s (h d) -> b h s d', h=self.n_heads)
self.feature_map.new_feature_map(query.device)
Q = self.feature_map.forward_queries(query)
K = self.feature_map.forward_keys(key)
causal = ((attn_mask is not None) and attn_mask.lower_triangular)
if (not ((attn_mask is None) or attn_mask.all_ones or causal)):
raise RuntimeError('LinearAttention does not support arbitrary attention masks')
if causal:
assert (query.shape[1] == key.shape[1]), 'query and key must have the same sequence length'
if (key_padding_mask is not None):
K.masked_fill_((~ rearrange(key_padding_mask.bool_matrix, 'b s -> b 1 s 1')), 0.0)
attn_fn = (causal_linear_attention if causal else linear_attention)
(out, attn) = attn_fn(Q, K, value, eps=self.eps, need_weights=need_weights)
out = rearrange(out, 'b h s d -> b s (h d)')
return (out, attn) |
def _restore_leading_dim(x: TensorType, leading_dim: TensorType) -> TensorType:
single_x_shape = tf.shape(x[0])
output_x_shape = tf.concat([leading_dim, single_x_shape], axis=0)
return tf.reshape(x, output_x_shape) |
def freeze_bn_func(m):
if ((m.__class__.__name__.find('BatchNorm') != (- 1)) or isinstance(m, nn.BatchNorm2d)):
m.weight.requires_grad = False
m.bias.requires_grad = False |
def sort(packed, ref, reverse=True):
assert ((isinstance(packed, tuple) or isinstance(packed, list)) and isinstance(ref, list))
packed = (([ref] + [range(len(ref))]) + list(packed))
sorted_packed = [list(t) for t in zip(*sorted(zip(*packed), reverse=reverse))]
return tuple(sorted_packed[1:]) |
def save_graph_edgelist(G, dst_dir):
nodelist = G.nodes()
node_id2idx = {k: v for (v, k) in enumerate(nodelist)}
with open(os.path.join(dst_dir, 'graph_node_id2idx.txt'), 'w') as f:
for (i, node) in enumerate(nodelist):
print(f'{node}, {i}', file=f)
with open(os.path.join(dst_dir, 'graph_edge.edgelist'), 'w') as f:
for edge in nx.generate_edgelist(G, data=['weight']):
(src_node, dst_node, weight) = edge.split(' ')
print(f'{node_id2idx[src_node]} {node_id2idx[dst_node]} {weight}', file=f) |
def up_stage(inputs, skip, filters, prior_fn, kernel_size=3, activation='relu', padding='SAME'):
up = UpSampling3D()(inputs)
up = tfp.layers.Convolution3DFlipout(filters, 2, activation=activation, padding=padding, kernel_prior_fn=prior_fn)(up)
up = GroupNormalization()(up)
merge = concatenate([skip, up])
merge = GroupNormalization()(merge)
conv = tfp.layers.Convolution3DFlipout(filters, kernel_size, activation=activation, padding=padding, kernel_prior_fn=prior_fn)(merge)
conv = GroupNormalization()(conv)
conv = tfp.layers.Convolution3DFlipout(filters, kernel_size, activation=activation, padding=padding, kernel_prior_fn=prior_fn)(conv)
conv = GroupNormalization()(conv)
return conv |
def register_all_objects365(root):
for (key, (image_root, json_file)) in _PREDEFINED_SPLITS_OBJECTS365.items():
register_coco_instances(key, _get_builtin_metadata(key), (os.path.join(root, json_file) if ('://' not in json_file) else json_file), os.path.join(root, image_root)) |
def levenshtein_matrix(first, second, cost_ins=1, cost_del=1, cost_sub=2):
first_length = (len(first) + 1)
second_length = (len(second) + 1)
distance_matrix = [([None] * second_length) for x in range(first_length)]
backpointers = {}
distance_matrix[0][0] = 0
for i in range(1, first_length):
distance_matrix[i][0] = i
edit = ('del', (i - 1), i, first[(i - 1)], '', 0)
backpointers[(i, 0)] = [(((i - 1), 0), edit)]
for j in range(1, second_length):
distance_matrix[0][j] = j
edit = ('ins', 0, 0, '', second[(j - 1)], 0)
backpointers[(0, j)] = [((0, (j - 1)), edit)]
for i in range(1, first_length):
for j in range(1, second_length):
deletion = (distance_matrix[(i - 1)][j] + cost_del)
insertion = (distance_matrix[i][(j - 1)] + cost_ins)
if (first[(i - 1)] == second[(j - 1)]):
substitution = distance_matrix[(i - 1)][(j - 1)]
else:
substitution = (distance_matrix[(i - 1)][(j - 1)] + cost_sub)
if (substitution == min(substitution, deletion, insertion)):
distance_matrix[i][j] = substitution
if (first[(i - 1)] != second[(j - 1)]):
edit = ('sub', (i - 1), i, first[(i - 1)], second[(j - 1)], 0)
else:
edit = ('noop', (i - 1), i, first[(i - 1)], second[(j - 1)], 1)
try:
backpointers[(i, j)].append((((i - 1), (j - 1)), edit))
except KeyError:
backpointers[(i, j)] = [(((i - 1), (j - 1)), edit)]
if (deletion == min(substitution, deletion, insertion)):
distance_matrix[i][j] = deletion
edit = ('del', (i - 1), i, first[(i - 1)], '', 0)
try:
backpointers[(i, j)].append((((i - 1), j), edit))
except KeyError:
backpointers[(i, j)] = [(((i - 1), j), edit)]
if (insertion == min(substitution, deletion, insertion)):
distance_matrix[i][j] = insertion
edit = ('ins', i, i, '', second[(j - 1)], 0)
try:
backpointers[(i, j)].append(((i, (j - 1)), edit))
except KeyError:
backpointers[(i, j)] = [((i, (j - 1)), edit)]
return (distance_matrix, backpointers) |
def read_tfrecord(example, train):
features = {'image': tf.io.FixedLenFeature([], tf.string), 'class': tf.io.FixedLenFeature([], tf.int64)}
example = tf.io.parse_single_example(example, features)
image = tf.image.decode_jpeg(example['image'], channels=3)
if train:
image = augment(image)
else:
image = tf.image.central_crop(image, central_fraction=CENTRAL_FRAC)
image = tf.image.resize(image, (RESIZE, RESIZE))
image = tf.reshape(image, (RESIZE, RESIZE, 3))
image = (tf.cast(image, tf.float32) / 255.0)
class_label = tf.cast(example['class'], tf.int32)
return (image, class_label) |
class TestFilelist(torch.utils.data.Dataset):
def __init__(self, root, flist, transform=None, flist_reader=default_flist_reader, loader=default_loader):
self.root = root
self.imlist = flist_reader(flist)
self.transfrom = transform
self.loader = loader
def __getitem__(self, index):
img_name = self.imlist[index]
img = self.loader(os.path.join(self.root, img_name))
if (self.transfrom is not None):
img = self.transfrom(img)
return (img, img_name)
def __len__(self):
return len(self.imlist) |
class Rdist(Module, metaclass=abc.ABCMeta):
def __init__(self, manif: Manifold, kmax: int):
super(Rdist, self).__init__()
self.manif = manif
self.d = manif.d
self.kmax = kmax
def sample(self, size, Y, batch_idxs, sample_idxs, kmax, analytic_kl, prior) -> Tuple[(Tensor, Tensor)]:
pass
def gmu_parameters(self):
pass
def concentration_parameters(self):
pass |
class FunctionLoader(BaseLoader):
def __init__(self, load_func):
self.load_func = load_func
def get_source(self, environment, template):
rv = self.load_func(template)
if (rv is None):
raise TemplateNotFound(template)
elif isinstance(rv, string_types):
return (rv, None, None)
return rv |
def assert_cache_file_is_ok(url, file_path):
cache_file_md5sum = _get_file_md5sum(file_path)
ref_md5sum = _get_reference_md5sum(url)
assert (cache_file_md5sum == ref_md5sum), 'Target URL {} appears to be downloaded to the local cache file {}, but the md5 hash of the local file does not match the reference (actual: {} vs. expected: {}). You may wish to delete the cached file and try again to trigger automatic download.'.format(url, file_path, cache_file_md5sum, ref_md5sum) |
def do_flop(cfg):
if isinstance(cfg, CfgNode):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
else:
data_loader = instantiate(cfg.dataloader.test)
model = instantiate(cfg.model)
model.to(cfg.train.device)
DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
model.eval()
counts = Counter()
total_flops = []
for (idx, data) in zip(tqdm.trange(args.num_inputs), data_loader):
if (args.use_fixed_input_size and isinstance(cfg, CfgNode)):
import torch
crop_size = cfg.INPUT.CROP.SIZE[0]
data[0]['image'] = torch.zeros((3, crop_size, crop_size))
flops = FlopCountAnalysis(model, data)
if (idx > 0):
flops.unsupported_ops_warnings(False).uncalled_modules_warnings(False)
counts += flops.by_operator()
total_flops.append(flops.total())
logger.info(('Flops table computed from only one input sample:\n' + flop_count_table(flops)))
logger.info(('Average GFlops for each type of operators:\n' + str([(k, ((v / (idx + 1)) / .0)) for (k, v) in counts.items()])))
logger.info('Total GFlops: {:.1f}{:.1f}'.format((np.mean(total_flops) / .0), (np.std(total_flops) / .0))) |
def ref_mean_subtraction(x, rmean, t, base_axis, batch_stat):
if batch_stat:
mean = (x.mean(tuple(range(0, base_axis))) if (base_axis >= 0) else x.mean(tuple(range(0, (len(x.shape) + base_axis)))))
rmean[...] = (rmean + ((mean - rmean) / (t + 1)))
t += 1
return (x - rmean) |
def print_results(results, num_print):
print()
values = list(results.values())
num_examples = len(values[0])
start = int((num_examples / 4))
end = (start + int((num_print / 2)))
first_list = [val[start:end] for val in values]
start = int(((3 * num_examples) / 4))
end = ((start + num_print) - int((num_print / 2)))
second_list = [val[start:end] for val in values]
processed_values = [(first + second) for (first, second) in zip(first_list, second_list)]
for ex_idx in range(len(processed_values[0])):
for (key_idx, key) in enumerate(results.keys()):
value = processed_values[key_idx][ex_idx]
v = (value[0] if isinstance(value, list) else value)
key_width = max((len(key) for key in results))
print(f'{key:>{key_width}}: {repr(v)}')
print()
sys.stdout.flush() |
class NeuralIR_Encoder(nn.Module):
def __init__(self, word_embeddings: TextFieldEmbedder, neural_ir_model: nn.Module):
super(NeuralIR_Encoder, self).__init__()
self.word_embeddings = word_embeddings
self.neural_ir_model = neural_ir_model
def forward(self, query: Dict[(str, torch.Tensor)], document: Dict[(str, torch.Tensor)], use_fp16: bool=True, output_secondary_output: bool=False) -> torch.Tensor:
with torch.cuda.amp.autocast(enabled=use_fp16):
(query_embeddings, document_embeddings, query_mask, document_mask) = get_vectors_n_masks(self.word_embeddings, query, document)
inner_model_result = self.neural_ir_model.forward(query_embeddings, document_embeddings, query_mask, document_mask, output_secondary_output)
return inner_model_result
def forward_representation(self, sequence: Dict[(str, torch.Tensor)], sequence_type: str) -> torch.Tensor:
(seq, mask) = get_single_vectors_n_masks(self.word_embeddings, sequence)
return self.neural_ir_model.forward_representation(seq, mask, sequence_type)
def get_param_stats(self):
return self.neural_ir_model.get_param_stats()
def get_param_secondary(self):
return self.neural_ir_model.get_param_secondary() |
class SiblingsPolicy(InclusivePolicy):
def negative_examples(self, node) -> np.ndarray:
siblings = self._get_siblings(node)
negative_classes = set()
for sibling in siblings:
negative_classes.update(self._get_descendants(sibling, inclusive=True))
negative_examples = np.isin(self.y, list(negative_classes)).any(axis=1)
return negative_examples |
class GaussianMLPTwoHeadedModule(GaussianMLPBaseModule):
def __init__(self, input_dim, output_dim, hidden_sizes=(32, 32), hidden_nonlinearity=torch.tanh, hidden_w_init=nn.init.xavier_uniform_, hidden_b_init=nn.init.zeros_, output_nonlinearity=None, output_w_init=nn.init.xavier_uniform_, output_b_init=nn.init.zeros_, learn_std=True, init_std=1.0, min_std=1e-06, max_std=None, std_parameterization='exp', layer_normalization=False, normal_distribution_cls=Normal):
super(GaussianMLPTwoHeadedModule, self).__init__(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, learn_std=learn_std, init_std=init_std, min_std=min_std, max_std=max_std, std_parameterization=std_parameterization, layer_normalization=layer_normalization, normal_distribution_cls=normal_distribution_cls)
self._shared_mean_log_std_network = MultiHeadedMLPModule(n_heads=2, input_dim=self._input_dim, output_dims=self._action_dim, hidden_sizes=self._hidden_sizes, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, output_nonlinearities=self._output_nonlinearity, output_w_inits=self._output_w_init, output_b_inits=[nn.init.zeros_, (lambda x: (nn.init.constant_(x, self._init_std.item()) if (self._std_parameterization not in ['softplus_real']) else (lambda x: nn.init.constant_(x, self._init_std.exp().exp().add((- 1.0)).log().item()))))], layer_normalization=self._layer_normalization)
def _get_mean_and_log_std(self, *inputs):
return self._shared_mean_log_std_network(*inputs)
def get_last_linear_layers(self):
return {'mean': self._shared_mean_log_std_network.get_last_linear_layer()} |
_function_dispatch(_recursive_fill_fields_dispatcher)
def recursive_fill_fields(input, output):
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if (current.dtype.names is not None):
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output |
class RichardsTests1D(BaseRichardsTest):
def get_mesh(self):
mesh = discretize.TensorMesh([np.ones(20)])
mesh.set_cell_gradient_BC('dirichlet')
print(mesh.dim)
return mesh
def get_rx_list(self, times):
locs = np.array([[5.0], [10], [15]])
rxSat = richards.receivers.Saturation(locations=locs, times=times)
rxPre = richards.receivers.Pressure(locations=locs, times=times)
return [rxSat, rxPre]
def get_conditions(self, mesh):
bc = np.array([(- 61.5), (- 20.7)])
h = (np.zeros(mesh.nC) + bc[0])
return (bc, h)
def setup_maps(self, mesh, k_fun, theta_fun):
k_fun.KsMap = maps.ExpMap(nP=mesh.nC)
def setup_model(self):
self.mtrue = np.log(self.Ks)
def _test_Richards_getResidual_Newton(self):
self._dotest_getResidual(True)
def _test_Richards_getResidual_Picard(self):
self._dotest_getResidual(False)
def test_adjoint(self):
self._dotest_adjoint()
def test_sensitivity(self):
self._dotest_sensitivity()
def test_sensitivity_full(self):
self._dotest_sensitivity_full() |
def main(args):
keys = ['train', 'dev', 'test']
dfs = []
for k in keys:
df = pd.read_json(os.path.join(args.dataset, 'data', (k + '.jsonl')), lines=True)
df['split'] = k
dfs.append(df)
dfs = pd.concat(dfs).reset_index(drop=True)
if ('annotation_id' in dfs.columns):
dfs = dfs.drop(columns=['annotation_id'])
dfs['document_length'] = dfs['document'].apply((lambda x: len(x.split())))
dfs = dfs.drop(columns=['document'])
if ('query' in dfs.columns):
dfs['query_length'] = dfs['query'].apply((lambda x: len(x.split())))
dfs = dfs.drop(columns=['query'])
else:
dfs['query_length'] = 0.0
if ('rationale' in dfs.columns):
dfs['rationale_length'] = (dfs['rationale'].apply((lambda x: sum([(y[1] - y[0]) for y in x]))) / dfs['document_length'])
dfs = dfs.drop(columns=['rationale'])
else:
dfs['rationale_length'] = 0.0
def aggregate(rows):
new_data = {}
for col in rows.columns:
if col.endswith('length'):
desc = np.median(rows[col].values)
print(col, np.max(rows[col].values))
print(col, np.percentile(rows[col].values, 90))
new_data[col] = desc
elif (col == 'label'):
label = rows[col].value_counts(normalize=True)[sorted(rows[col].unique())]
new_data[col] = ' / '.join([str(x) for x in label.round(2).values])
new_data[(col + '_')] = ' / '.join([str(x) for x in label.round(2).index])
new_data['N'] = len(rows)
return pd.Series(new_data)
agg = aggregate(dfs)
print(agg)
agg.name = args.dataset
agg = pd.DataFrame(agg).T
agg = agg.drop(columns=['label_'])
print(agg.to_latex()) |
def _get_treatment_role(roles: Dict[(Union[(ColumnRole, str)], Union[(str, Sequence[str])])]) -> Tuple[(Union[(TreatmentRole, str)], str)]:
treatment_role: Optional[Union[(TreatmentRole, str)]] = None
treatment_col: str
for (k, v) in roles.items():
if (isinstance(k, TreatmentRole) or (isinstance(k, str) and (k == 'treatment'))):
if ((not isinstance(v, str)) and isinstance(v, Sequence)):
raise RuntimeError('Treatment column must be unique')
else:
(treatment_role, treatment_col) = (k, v)
break
if (treatment_role is None):
raise RuntimeError('Treatment role is absent')
return (treatment_role, treatment_col) |
def test():
print('Nested stream test')
Bdata = np.zeros([2], np.int32)
sdfg(B=Bdata)
B_regression = np.array([2, 0], dtype=np.int32)
diff = np.linalg.norm((B_regression - Bdata))
print('Difference:', diff)
assert (diff == 0) |
class FiniteWordPath_square_grid_list(WordDatatype_list, FiniteWordPath_square_grid, FiniteWord_class):
pass |
def set_time_limit_in_seconds(parser, args, component):
param = (component + '_time_limit')
limit = getattr(args, param)
if (limit is not None):
setattr(args, param, _get_time_limit_in_seconds(limit, parser)) |
def COM(self, marker):
n = (i16(self.fp.read(2)) - 2)
s = ImageFile._safe_read(self.fp, n)
self.info['comment'] = s
self.app['COM'] = s
self.applist.append(('COM', s)) |
def select_policies(runs, metric_np, K):
S = []
n = len(runs)
S.append(np.random.randint(0, n))
for iter in range(1, K):
v = np.zeros((n,), dtype=np.float32)
for i in range(n):
if (i not in S):
for j in S:
v[i] += abs((metric_np[i] - metric_np[j])).sum()
else:
v[i] = (- .0)
x = v.argmax()
S.append(x)
S = sorted([runs[i] for i in S])
return S |
class Conv1DTranspose(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size, strides=1, padding='valid', **kwargs):
super().__init__()
self.conv2dtranspose = tf.keras.layers.Conv2DTranspose(filters, (kernel_size, 1), (strides, 1), padding, **kwargs)
def call(self, x):
x = tf.expand_dims(x, axis=2)
x = self.conv2dtranspose(x)
x = tf.squeeze(x, axis=2)
return x |
def test_detection_list_select():
detections = seisbench.util.DetectionList([seisbench.util.Detection('CX.PB01.', None, None, peak_value=0.5), seisbench.util.Detection('CX.PB02.', None, None, peak_value=0.3), seisbench.util.Detection('CX.PB03.', None, None, peak_value=None)])
assert (len(detections.select(min_confidence=0.1)) == 2)
assert (len(detections.select(min_confidence=0.4)) == 1)
assert (len(detections.select(trace_id='CX\\.PB0[12]\\.')) == 2) |
_REGISTRY.register()
def build_effnet_backbone(cfg):
pretrain = cfg.MODEL.BACKBONE.PRETRAIN
pretrain_path = cfg.MODEL.BACKBONE.PRETRAIN_PATH
last_stride = cfg.MODEL.BACKBONE.LAST_STRIDE
bn_norm = cfg.MODEL.BACKBONE.NORM
depth = cfg.MODEL.BACKBONE.DEPTH
cfg_files = {'b0': 'fastreid/modeling/backbones/regnet/effnet/EN-B0_dds_8gpu.yaml', 'b1': 'fastreid/modeling/backbones/regnet/effnet/EN-B1_dds_8gpu.yaml', 'b2': 'fastreid/modeling/backbones/regnet/effnet/EN-B2_dds_8gpu.yaml', 'b3': 'fastreid/modeling/backbones/regnet/effnet/EN-B3_dds_8gpu.yaml', 'b4': 'fastreid/modeling/backbones/regnet/effnet/EN-B4_dds_8gpu.yaml', 'b5': 'fastreid/modeling/backbones/regnet/effnet/EN-B5_dds_8gpu.yaml'}[depth]
effnet_cfg.merge_from_file(cfg_files)
model = EffNet(last_stride, bn_norm)
if pretrain:
if pretrain_path:
try:
state_dict = torch.load(pretrain_path, map_location=torch.device('cpu'))['model_state']
logger.info(f'Loading pretrained model from {pretrain_path}')
except FileNotFoundError as e:
logger.info(f'{pretrain_path} is not found! Please check this path.')
raise e
except KeyError as e:
logger.info('State dict keys error! Please check the state dict.')
raise e
else:
key = depth
state_dict = init_pretrained_weights(key)
incompatible = model.load_state_dict(state_dict, strict=False)
if incompatible.missing_keys:
logger.info(get_missing_parameters_message(incompatible.missing_keys))
if incompatible.unexpected_keys:
logger.info(get_unexpected_parameters_message(incompatible.unexpected_keys))
return model |
def flaky_xfail_mark(exception, issue_numbers):
if isinstance(issue_numbers, int):
issue_numbers = [issue_numbers]
if (not issue_numbers):
raise ValueError('at least one issue must be specified when marking a test as flaky')
issues = ' '.join((f'< for num in issue_numbers))
return pytest.mark.xfail(raises=exception, reason=f'flaky: {issues}') |
def colorx(clip, factor):
return clip.fl_image((lambda pic: np.minimum(255, (factor * pic)).astype('uint8'))) |
def count_used_parameters(model):
return sum((p.numel() for p in model.parameters() if (p.grad is not None))) |
def test_short_decorator():
A = np.random.rand(20)
assert np.allclose(short_decorator(A), (A + A)) |
def get_all_int_dtypes() -> List[torch.dtype]:
return [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64] |
_grad()
def tensor2np(x: torch.Tensor) -> np.array:
x = (127.5 * (x + 1))
x = x.round().clamp(min=0, max=255).byte()
x = x.squeeze(0)
x = x.cpu().numpy()
x = np.transpose(x, (1, 2, 0))
x = np.ascontiguousarray(x)
return x |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.