code stringlengths 101 5.91M |
|---|
def nn(input, layers_sizes, reuse=None, flatten=False, name=''):
for (i, size) in enumerate(layers_sizes):
activation = (tf.nn.relu if (i < (len(layers_sizes) - 1)) else None)
input = tf.compat.v1.layers.dense(inputs=input, units=size, kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode='fan_avg', distribution='uniform'), reuse=reuse, name=((name + '_') + str(i)))
if activation:
input = activation(input)
if flatten:
assert (layers_sizes[(- 1)] == 1)
input = tf.reshape(input, [(- 1)])
return input |
def p_comp_iter(s, body):
if (s.sy in ('for', 'async')):
return p_comp_for(s, body)
elif (s.sy == 'if'):
return p_comp_if(s, body)
else:
return body |
def evaluate(model, test_idxs):
model.eval()
batch_idx = 1
total_loss = 0
pred = torch.empty(config['batch_size'], 1).type(torch.LongTensor)
X_test = text_features[test_idxs]
Y_test = text_targets[test_idxs]
global max_train_acc, max_acc, max_f1
for i in range(0, X_test.shape[0], config['batch_size']):
if ((i + config['batch_size']) > X_test.shape[0]):
(x, y) = (X_test[i:], Y_test[i:])
else:
(x, y) = (X_test[i:(i + config['batch_size'])], Y_test[i:(i + config['batch_size'])])
if config['cuda']:
(x, y) = (Variable(torch.from_numpy(x).type(torch.FloatTensor), requires_grad=True).cuda(), Variable(torch.from_numpy(y)).cuda())
else:
(x, y) = (Variable(torch.from_numpy(x).type(torch.FloatTensor), requires_grad=True), Variable(torch.from_numpy(y)))
with torch.no_grad():
output = model(x.squeeze(2))
pred = torch.cat((pred, output.data.max(1, keepdim=True)[1]))
(y_test_pred, conf_matrix) = model_performance(Y_test, pred[config['batch_size']:])
print('Calculating additional test metrics...')
accuracy = (float((conf_matrix[0][0] + conf_matrix[1][1])) / np.sum(conf_matrix))
precision = (float(conf_matrix[0][0]) / (conf_matrix[0][0] + conf_matrix[0][1]))
recall = (float(conf_matrix[0][0]) / (conf_matrix[0][0] + conf_matrix[1][0]))
f1_score = ((2 * (precision * recall)) / (precision + recall))
print('Accuracy: {}'.format(accuracy))
print('Precision: {}'.format(precision))
print('Recall: {}'.format(recall))
print('F1-Score: {}\n'.format(f1_score))
print(('=' * 89))
return (precision, recall, f1_score) |
def test_method_get_variable_references(method_mock, default_test_case):
float1 = stmt.FloatPrimitiveStatement(default_test_case, 5.0)
float2 = stmt.FloatPrimitiveStatement(default_test_case, 10.0)
meth = stmt.MethodStatement(default_test_case, method_mock, float2.ret_val, args={'test': float1.ret_val})
default_test_case.add_statement(float1)
default_test_case.add_statement(float2)
default_test_case.add_statement(meth)
assert (meth.get_variable_references() == {float1.ret_val, float2.ret_val, meth.ret_val}) |
def sanitize(x: Any) -> Any:
if isinstance(x, (str, float, int, bool)):
return x
elif isinstance(x, torch.autograd.Variable):
return sanitize(x.data)
elif isinstance(x, torch._TensorBase):
return x.cpu().tolist()
elif isinstance(x, numpy.ndarray):
return x.tolist()
elif isinstance(x, numpy.number):
return x.item()
elif isinstance(x, dict):
return {key: sanitize(value) for (key, value) in x.items()}
elif isinstance(x, (list, tuple)):
return [sanitize(x_i) for x_i in x]
else:
raise ValueError('cannot sanitize {} of type {}'.format(x, type(x))) |
class ThreeInterpolate(Function):
def forward(ctx, features, idx, weight):
ctx.save_for_backward(idx, weight, features)
return _ext.three_interpolate(features, idx, weight)
def backward(ctx, grad_out):
(idx, weight, features) = ctx.saved_tensors
m = features.size(2)
grad_features = _ext.three_interpolate_grad(grad_out.contiguous(), idx, weight, m)
return (grad_features, torch.zeros_like(idx), torch.zeros_like(weight)) |
class DPRDoc_Retrieval():
def __init__(self, topk=100, model_type='ftwctx'):
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
self.topk = topk
self.model_type = model_type
self.q_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
if (self.model_type == 'original'):
self.q_encoder = DPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
elif (self.model_type == 'ftnoctx'):
self.q_encoder = DPRQuestionEncoder.from_pretrained('nqt2e13/')
else:
print('model wctx')
self.q_encoder = DPRQuestionEncoder.from_pretrained('wctxt1e21/')
self.q_encoder = self.q_encoder.to(self.device)
def get_top(self, question, topk=5):
question_emb = self.q_encoder(**self.q_tokenizer(question, return_tensors='pt'))[0].detach().numpy()
(passages_scores, passages) = wiki.get_nearest_examples('embeddings', question_emb, k=selftopk)
all_passgae = ''
for (score, title, text) in zip(passages_scores, passages['title'], passages['text']):
if (len(all_passgae.split(' ')) < 450):
all_passgae += f' ({title}) {text}'
return all_passgae
def get_top_passages(self, question, topk=None):
if (topk is None):
topk = self.topk
question_emb = self.q_encoder(**self.q_tokenizer(question, return_tensors='pt').to(self.device))[0].cpu().detach().numpy()
(passages_scores, passages) = wiki.get_nearest_examples('embeddings', question_emb, k=topk)
return (passages_scores, passages) |
class ARBatchSampler(Sampler):
def __init__(self, data_source, batch_size, drop_last=False, epoch=0):
super(ARBatchSampler, self).__init__(data_source)
self.data_source = data_source
self.batch_size = batch_size
self.drop_last = drop_last
self._epoch = epoch
self.img_sets = self.data_source.get_images
def _split_images(self, indices):
img_sizes = self.data_source.img_sizes
img_sets = [[], []]
for img_id in indices:
aspect_ratio = (img_sizes[img_id][0] / img_sizes[img_id][1])
if (aspect_ratio < 1):
img_sets[0].append({'id': img_id, 'ar': aspect_ratio})
else:
img_sets[1].append({'id': img_id, 'ar': aspect_ratio})
return img_sets
def _generate_batches(self):
g = torch.Generator()
g.manual_seed(self._epoch)
self.img_sets = [self.img_sets[i] for i in torch.randperm(len(self.img_sets), generator=g)]
batches = []
leftover = []
batch = []
for img in self.img_sets:
batch.append(img)
if (len(batch) == self.batch_size):
batches.append(batch)
batch = []
leftover += batch
if (not self.drop_last):
batch = []
for img in leftover:
batch.append(img)
if (len(batch) == self.batch_size):
batches.append(batch)
batch = []
if (len(batch) != 0):
batches.append(batch)
return batches
def set_epoch(self, epoch):
self._epoch = epoch
def __len__(self):
if self.drop_last:
return (len(self.img_sets) // self.batch_size)
else:
return (((len(self.img_sets) + self.batch_size) - 1) // self.batch_size)
def __iter__(self):
batches = self._generate_batches()
for batch in batches:
batch = sorted(batch, key=(lambda i: i['ar']))
batch = [i['id'] for i in batch]
(yield batch) |
def OzaBaggingAdwin(base_estimator=KNNADWINClassifier(), n_estimators=10, random_state=None):
warnings.warn("'OzaBaggingAdwin' has been renamed to 'OzaBaggingADWINClassifier' in v0.5.0.\nThe old name will be removed in v0.7.0", category=FutureWarning)
return OzaBaggingADWINClassifier(base_estimator=base_estimator, n_estimators=n_estimators, random_state=random_state) |
class PipelineTestCaseMeta(type):
def __new__(mcs, name, bases, dct):
def gen_test(ModelClass, checkpoint, tiny_config, tokenizer_class, feature_extractor_class):
((tiny_config is None), 'TinyConfig does not exist')
((checkpoint is None), 'checkpoint does not exist')
def test(self):
if ModelClass.__name__.endswith('ForCausalLM'):
tiny_config.is_encoder_decoder = False
if hasattr(tiny_config, 'encoder_no_repeat_ngram_size'):
tiny_config.encoder_no_repeat_ngram_size = 0
if ModelClass.__name__.endswith('WithLMHead'):
tiny_config.is_decoder = True
try:
model = ModelClass(tiny_config)
except ImportError as e:
self.skipTest(f"Cannot run with {tiny_config} as the model requires a library that isn't installed: {e}")
if hasattr(model, 'eval'):
model = model.eval()
if (tokenizer_class is not None):
try:
tokenizer = get_tiny_tokenizer_from_checkpoint(checkpoint)
if isinstance(model.config, (RobertaConfig, IBertConfig)):
tokenizer.model_max_length = (model.config.max_position_embeddings - 2)
elif (hasattr(model.config, 'max_position_embeddings') and (model.config.max_position_embeddings > 0)):
tokenizer.model_max_length = model.config.max_position_embeddings
except:
self.skipTest(f'Ignoring {ModelClass}, cannot create a simple tokenizer')
else:
tokenizer = None
feature_extractor = get_tiny_feature_extractor_from_checkpoint(checkpoint, tiny_config, feature_extractor_class)
if ((tokenizer is None) and (feature_extractor is None)):
self.skipTest(f'Ignoring {ModelClass}, cannot create a tokenizer or feature_extractor (PerceiverConfig with no FastTokenizer ?)')
(pipeline, examples) = self.get_test_pipeline(model, tokenizer, feature_extractor)
if (pipeline is None):
return
self.run_pipeline_test(pipeline, examples)
def run_batch_test(pipeline, examples):
if ((pipeline.tokenizer is not None) and (pipeline.tokenizer.pad_token_id is None)):
return
def data(n):
for _ in range(n):
(yield copy.deepcopy(random.choice(examples)))
out = []
for item in pipeline(data(10), batch_size=4):
out.append(item)
self.assertEqual(len(out), 10)
run_batch_test(pipeline, examples)
return test
for (prefix, key) in [('pt', 'model_mapping'), ('tf', 'tf_model_mapping')]:
mapping = dct.get(key, {})
if mapping:
for (configuration, model_architectures) in mapping.items():
if (not isinstance(model_architectures, tuple)):
model_architectures = (model_architectures,)
for model_architecture in model_architectures:
checkpoint = get_checkpoint_from_architecture(model_architecture)
tiny_config = get_tiny_config_from_class(configuration)
tokenizer_classes = TOKENIZER_MAPPING.get(configuration, [])
feature_extractor_class = FEATURE_EXTRACTOR_MAPPING.get(configuration, None)
feature_extractor_name = (feature_extractor_class.__name__ if feature_extractor_class else 'nofeature_extractor')
if (not tokenizer_classes):
tokenizer_classes = [None]
else:
tokenizer_classes = [tokenizer_class for tokenizer_class in tokenizer_classes if (tokenizer_class is not None)]
for tokenizer_class in tokenizer_classes:
if (tokenizer_class is not None):
tokenizer_name = tokenizer_class.__name__
else:
tokenizer_name = 'notokenizer'
test_name = f'test_{prefix}_{configuration.__name__}_{model_architecture.__name__}_{tokenizer_name}_{feature_extractor_name}'
if ((tokenizer_class is not None) or (feature_extractor_class is not None)):
dct[test_name] = gen_test(model_architecture, checkpoint, tiny_config, tokenizer_class, feature_extractor_class)
def inner(self):
raise NotImplementedError('Not implemented test')
dct['test_small_model_pt'] = dct.get('test_small_model_pt', inner)
dct['test_small_model_tf'] = dct.get('test_small_model_tf', inner)
return type.__new__(mcs, name, bases, dct) |
def pop_layer(model):
if (not model.outputs):
raise Exception('Sequential model cannot be popped: model is empty.')
model.layers.pop()
if (not model.layers):
model.outputs = []
model.inbound_nodes = []
model.outbound_nodes = []
else:
model.layers[(- 1)].outbound_nodes = []
model.outputs = [model.layers[(- 1)].output]
model.built = False |
def test_deepcopy():
class Nocopy(SDFGConvertible):
def __sdfg__(self, *args, **kwargs):
def bla(a: dace.float64[20]):
return a
return bla.to_sdfg()
def __sdfg_closure__(self, reevaluate=None):
return {}
def __sdfg_signature__(self):
return [['a'], []]
def __deepcopy__(self, memo):
raise ValueError('DO NOT COPY ME PLEASE')
nocopy = Nocopy()
def someprogram(a):
for i in dace.unroll(range(3)):
a += (i * nocopy(a))
b = np.random.rand(20)
expected = (6 * b)
someprogram(b)
assert np.allclose(b, expected) |
def _legal_action_mask(board_2d):
return jax.vmap(_can_slide_left)(jnp.array([board_2d, jnp.rot90(board_2d, 1), jnp.rot90(board_2d, 2), jnp.rot90(board_2d, 3)])) |
def test_get_data_for_tensorkey_locally(collaborator_mock, tensor_key):
tensor_key = tensor_key._replace(round_number=1)
nparray = numpy.array([0, 1, 2, 3, 4])
collaborator_mock.tensor_db.get_tensor_from_cache = mock.Mock(side_effect=[None, nparray])
ret = collaborator_mock.get_data_for_tensorkey(tensor_key)
assert numpy.array_equal(ret, nparray) |
_sentencepiece
_torch
_pytesseract
class LayoutXLMProcessorIntegrationTests(unittest.TestCase):
_property
def get_images(self):
from datasets import load_dataset
ds = load_dataset('hf-internal-testing/fixtures_docvqa', split='test')
image_1 = Image.open(ds[0]['file']).convert('RGB')
image_2 = Image.open(ds[1]['file']).convert('RGB')
return (image_1, image_2)
_property
def get_tokenizers(self):
slow_tokenizer = LayoutXLMTokenizer.from_pretrained('microsoft/layoutxlm-base')
fast_tokenizer = LayoutXLMTokenizerFast.from_pretrained('microsoft/layoutxlm-base')
return [slow_tokenizer, fast_tokenizer]
def test_processor_case_1(self):
feature_extractor = LayoutLMv2FeatureExtractor()
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutXLMProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer)
input_feat_extract = feature_extractor(images[0], return_tensors='pt')
input_processor = processor(images[0], return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids']
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
self.assertAlmostEqual(input_feat_extract['pixel_values'].sum(), input_processor['image'].sum(), delta=0.01)
expected_decoding = '<s> 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President Introductory Remarks Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer</s>'
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
input_feat_extract = feature_extractor(images, return_tensors='pt')
input_processor = processor(images, padding=True, return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids']
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
self.assertAlmostEqual(input_feat_extract['pixel_values'].sum(), input_processor['image'].sum(), delta=0.01)
expected_decoding = "<s> 7 ITC Limited REPORT AND ACCOUNTS 2013 ITCs Brands: An Asset for the Nation The consumer needs and aspirations they fulfil, the benefit they generate for millions across ITCs value chains, the future-ready capabilities that support them, and the value that they create for the country, have made ITCs brands national assets, adding to Indias competitiveness. It is ITCs aspiration to be the No 1 FMCG player in the country, driven by its new FMCG businesses. A recent Nielsen report has highlighted that ITC's new FMCG businesses are the fastest growing among the top consumer goods companies operating in India. ITC takes justifiable pride that, along with generating economic value, these celebrated Indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. DI WILLS * ; LOVE DELIGHTFULLY SOFT SKIN? aia Ans Source:
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
def test_processor_case_2(self):
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutXLMProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer)
words = ['hello', 'world']
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
input_processor = processor(images[0], words, boxes=boxes, return_tensors='pt')
expected_keys = ['input_ids', 'bbox', 'attention_mask', 'image']
actual_keys = list(input_processor.keys())
for key in expected_keys:
self.assertIn(key, actual_keys)
expected_decoding = '<s> hello world</s>'
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
words = [['hello', 'world'], ['my', 'name', 'is', 'niels']]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
input_processor = processor(images, words, boxes=boxes, padding=True, return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids']
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
expected_decoding = '<s> hello world</s><pad><pad>'
decoding = processor.decode(input_processor.input_ids[0].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_bbox = [[0, 0, 0, 0], [3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000]]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
def test_processor_case_3(self):
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutXLMProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer)
words = ['weirdly', 'world']
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
word_labels = [1, 2]
input_processor = processor(images[0], words, boxes=boxes, word_labels=word_labels, return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids', 'labels']
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
expected_decoding = '<s> weirdly world</s>'
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_labels = [(- 100), 1, (- 100), 2, (- 100)]
self.assertListEqual(input_processor.labels.squeeze().tolist(), expected_labels)
words = [['hello', 'world'], ['my', 'name', 'is', 'niels']]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
word_labels = [[1, 2], [6, 3, 10, 2]]
input_processor = processor(images, words, boxes=boxes, word_labels=word_labels, padding=True, return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids', 'labels']
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
expected_decoding = '<s> my name is niels</s>'
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_bbox = [[0, 0, 0, 0], [3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000]]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
expected_labels = [(- 100), 6, 3, 10, 2, (- 100), (- 100)]
self.assertListEqual(input_processor.labels[1].tolist(), expected_labels)
def test_processor_case_4(self):
feature_extractor = LayoutLMv2FeatureExtractor()
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutXLMProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer)
question = "What's his name?"
input_processor = processor(images[0], question, return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids']
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
expected_decoding = "<s> What's his name?</s></s> 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President Introductory Remarks Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer</s>"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
questions = ['How old is he?', "what's the time"]
input_processor = processor(images, questions, padding='max_length', max_length=20, truncation=True, return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids']
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
expected_decoding = "<s> what's the time</s></s> 7 ITC Limited REPORT AND ACCOUNTS 2013</s>"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [289, 59, 365, 66], [289, 59, 365, 66], [289, 59, 365, 66], [372, 59, 407, 66], [1000, 1000, 1000, 1000]]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
def test_processor_case_5(self):
feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutXLMProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer)
question = "What's his name?"
words = ['hello', 'world']
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
input_processor = processor(images[0], question, words, boxes, return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids']
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
expected_decoding = "<s> What's his name?</s></s> hello world</s>"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
questions = ['How old is he?', "what's the time"]
words = [['hello', 'world'], ['my', 'name', 'is', 'niels']]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
input_processor = processor(images, questions, words, boxes, padding=True, return_tensors='pt')
expected_keys = ['attention_mask', 'bbox', 'image', 'input_ids']
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
expected_decoding = '<s> How old is he?</s></s> hello world</s><pad><pad>'
decoding = processor.decode(input_processor.input_ids[0].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_decoding = "<s> what's the time</s></s> my name is niels</s>"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_bbox = [[6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000]]
self.assertListEqual(input_processor.bbox[1].tolist()[(- 5):], expected_bbox) |
class _ComputeSim(torch.nn.Module):
def __init__(self):
super(_ComputeSim, self).__init__()
def forward(self, x1, x2):
assert (x1.ndim == 2), 'x1.ndim must be 2, but found {}.'.format(x1.ndim)
assert (x1.size()[0] == 1), 'x1.size[0] must be 1, but found {}.'.format(x1.size()[0])
assert (x2.ndim == 2), 'x2.ndim must be 2, but found {}.'.format(x2.ndim)
assert (x1.size()[1] == x2.size()[1]), 'x1 dim={} and x2 dim={}. they are supposed to be similar.'.format(x1.size()[1], x2.size()[1])
n = x2.size()[0]
x = x1.repeat(n, 1)
diff = (x - x2)
return (diff * diff).sum(dim=1).squeeze() |
def main():
args = parse_args()
if (args is None):
exit()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
gan = StarGAN_v2(sess, args)
gan.build_model()
show_all_variables()
if (args.phase == 'train'):
gan.train()
print(' [*] Training finished!')
elif (args.phase == 'refer_test'):
gan.refer_test()
print(' [*] Refer test finished!')
else:
gan.test()
print(' [*] Test finished!') |
class mnist_model(nn.Module):
def __init__(self):
super(mnist_model, self).__init__()
self.layer1 = nn.Conv2d(1, 20, kernel_size=5, stride=1, padding=0)
self.layer2 = nn.Conv2d(20, 50, kernel_size=5, stride=1, padding=0)
self.layer3 = nn.Linear(800, 500, bias=True)
self.layer4 = nn.Linear(500, 10, bias=True)
self.act = nn.ReLU()
self.pool = nn.MaxPool2d((2, 2))
def forward(self, x):
out = self.act(self.layer1(x))
out = self.pool(out)
out = self.act(self.layer2(out))
out = self.pool(out)
out = out.view((- 1), 800)
out = self.act(self.layer3(out))
out = self.act(self.layer4(out))
return out
def output(self, x):
out1 = self.act(self.layer1(x))
out1 = self.pool(out1)
out2 = self.act(self.layer2(out1))
out2 = self.pool(out2)
out2 = out2.view((- 1), 800)
out3 = self.act(self.layer3(out2))
out4 = self.act(self.layer4(out3))
return (out1, out2, out3, out4) |
def get_check_binary_allowed(format_control):
def check_binary_allowed(req):
if req.use_pep517:
return True
canonical_name = canonicalize_name(req.name)
allowed_formats = format_control.get_allowed_formats(canonical_name)
return ('binary' in allowed_formats)
return check_binary_allowed |
def trainer_main(args):
if args.ignore_warnings:
warnings.filterwarnings('ignore')
Path(args.checkpoints_dir).mkdir(parents=True, exist_ok=True)
seed_everything(args.seed)
tokenizer = FSNERTokenizerUtils(args.pretrained_model)
train_data_dict = load_dataset((args.train_data if (args.mode == 'train') else args.val_data), tokenizer)
val_data_dict = load_dataset(args.val_data, tokenizer)
datamodule = FSNERDataModule(train_data_dict=train_data_dict, val_data_dict=val_data_dict, tokenizer=tokenizer, train_batch_size=args.train_batch_size, val_batch_size=args.val_batch_size, n_examples_per_entity=args.n_examples_per_entity, negative_examples_ratio=args.neg_example_batch_ratio)
datamodule.setup('fit')
model = FSNERModel(model_name_or_path=args.pretrained_model, epoch_steps=datamodule.epoch_steps, token_embeddings_size=len(tokenizer.tokenizer))
class FSNERTrainer(Trainer):
def save_checkpoint(self, filepath, weights_only=False):
dirpath = os.path.split(filepath)[0]
self.lightning_module.model.save_pretrained(dirpath)
self.datamodule.tokenizer.tokenizer.save_pretrained(dirpath)
gpu_related_params = {'gpus': args.gpus, 'strategy': args.strategy}
trainer = FSNERTrainer(accelerator=args.device, **(gpu_related_params if (args.device == 'gpu') else {}), callbacks=[ModelCheckpoint(monitor='val_loss', dirpath=Path(args.checkpoints_dir).joinpath('model'), save_top_k=1, mode='min')], enable_checkpointing=True, default_root_dir=args.checkpoints_dir, max_epochs=args.max_epochs, logger=False, num_sanity_val_steps=0, reload_dataloaders_every_n_epochs=1)
if (args.mode == 'train'):
trainer.fit(model, datamodule)
else:
trainer.validate(model, datamodule) |
class GaussianMLPRegressor(LayersPowered, Serializable):
def __init__(self, name, input_shape, output_dim, mean_network=None, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, optimizer=None, use_trust_region=True, step_size=0.01, learn_std=True, init_std=1.0, adaptive_std=False, std_share_network=False, std_hidden_sizes=(32, 32), std_nonlinearity=None, normalize_inputs=True, normalize_outputs=True, subsample_factor=1.0, batchsize=1024):
Serializable.quick_init(self, locals())
with tf.variable_scope(name):
if (optimizer is None):
if use_trust_region:
optimizer = PenaltyLbfgsOptimizer('optimizer')
else:
optimizer = LbfgsOptimizer('optimizer')
self._optimizer = optimizer
self._subsample_factor = subsample_factor
if (mean_network is None):
mean_network = MLP(name='mean_network', input_shape=input_shape, output_dim=output_dim, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=None)
l_mean = mean_network.output_layer
if adaptive_std:
l_log_std = MLP(name='log_std_network', input_shape=input_shape, input_var=mean_network.input_layer.input_var, output_dim=output_dim, hidden_sizes=std_hidden_sizes, hidden_nonlinearity=std_nonlinearity, output_nonlinearity=None).output_layer
else:
l_log_std = L.ParamLayer(mean_network.input_layer, num_units=output_dim, param=tf.constant_initializer(np.log(init_std)), name='output_log_std', trainable=learn_std)
LayersPowered.__init__(self, [l_mean, l_log_std])
xs_var = mean_network.input_layer.input_var
ys_var = tf.placeholder(dtype=tf.float32, name='ys', shape=(None, output_dim))
old_means_var = tf.placeholder(dtype=tf.float32, name='old_means', shape=(None, output_dim))
old_log_stds_var = tf.placeholder(dtype=tf.float32, name='old_log_stds', shape=(None, output_dim))
x_mean_var = tf.Variable(np.zeros(((1,) + input_shape), dtype=np.float32), name='x_mean')
x_std_var = tf.Variable(np.ones(((1,) + input_shape), dtype=np.float32), name='x_std')
y_mean_var = tf.Variable(np.zeros((1, output_dim), dtype=np.float32), name='y_mean')
y_std_var = tf.Variable(np.ones((1, output_dim), dtype=np.float32), name='y_std')
normalized_xs_var = ((xs_var - x_mean_var) / x_std_var)
normalized_ys_var = ((ys_var - y_mean_var) / y_std_var)
normalized_means_var = L.get_output(l_mean, {mean_network.input_layer: normalized_xs_var})
normalized_log_stds_var = L.get_output(l_log_std, {mean_network.input_layer: normalized_xs_var})
means_var = ((normalized_means_var * y_std_var) + y_mean_var)
log_stds_var = (normalized_log_stds_var + tf.log(y_std_var))
normalized_old_means_var = ((old_means_var - y_mean_var) / y_std_var)
normalized_old_log_stds_var = (old_log_stds_var - tf.log(y_std_var))
dist = self._dist = DiagonalGaussian(output_dim)
normalized_dist_info_vars = dict(mean=normalized_means_var, log_std=normalized_log_stds_var)
mean_kl = tf.reduce_mean(dist.kl_sym(dict(mean=normalized_old_means_var, log_std=normalized_old_log_stds_var), normalized_dist_info_vars))
loss = (- tf.reduce_mean(dist.log_likelihood_sym(normalized_ys_var, normalized_dist_info_vars)))
self._f_predict = tensor_utils.compile_function([xs_var], means_var)
self._f_pdists = tensor_utils.compile_function([xs_var], [means_var, log_stds_var])
self._l_mean = l_mean
self._l_log_std = l_log_std
optimizer_args = dict(loss=loss, target=self, network_outputs=[normalized_means_var, normalized_log_stds_var])
if use_trust_region:
optimizer_args['leq_constraint'] = (mean_kl, step_size)
optimizer_args['inputs'] = [xs_var, ys_var, old_means_var, old_log_stds_var]
else:
optimizer_args['inputs'] = [xs_var, ys_var]
self._optimizer.update_opt(**optimizer_args)
self._use_trust_region = use_trust_region
self._name = name
self._normalize_inputs = normalize_inputs
self._normalize_outputs = normalize_outputs
self._mean_network = mean_network
self._x_mean_var = x_mean_var
self._x_std_var = x_std_var
self._y_mean_var = y_mean_var
self._y_std_var = y_std_var
self._batchsize = batchsize
def fit(self, xs, ys):
if (self._subsample_factor < 1):
num_samples_tot = xs.shape[0]
idx = np.random.randint(0, num_samples_tot, int((num_samples_tot * self._subsample_factor)))
(xs, ys) = (xs[idx], ys[idx])
sess = tf.get_default_session()
if self._normalize_inputs:
sess.run([tf.assign(self._x_mean_var, np.mean(xs, axis=0, keepdims=True)), tf.assign(self._x_std_var, (np.std(xs, axis=0, keepdims=True) + 1e-08))])
if self._normalize_outputs:
sess.run([tf.assign(self._y_mean_var, np.mean(ys, axis=0, keepdims=True)), tf.assign(self._y_std_var, (np.std(ys, axis=0, keepdims=True) + 1e-08))])
if self._name:
prefix = (self._name + '_')
else:
prefix = ''
(loss_before, loss_after, mean_kl, batch_count) = (0.0, 0.0, 0.0, 0)
for batch in iterate_minibatches_generic(input_lst=[xs, ys], batchsize=self._batchsize, shuffle=True):
batch_count += 1
(xs, ys) = batch
if self._use_trust_region:
(old_means, old_log_stds) = self._f_pdists(xs)
inputs = [xs, ys, old_means, old_log_stds]
else:
inputs = [xs, ys]
loss_before += self._optimizer.loss(inputs)
self._optimizer.optimize(inputs)
loss_after += self._optimizer.loss(inputs)
if self._use_trust_region:
mean_kl += self._optimizer.constraint_val(inputs)
'\n if self._use_trust_region:\n old_means, old_log_stds = self._f_pdists(xs)\n inputs = [xs, ys, old_means, old_log_stds]\n else:\n inputs = [xs, ys]\n loss_before = self._optimizer.loss(inputs)\n \n self._optimizer.optimize(inputs)\n loss_after = self._optimizer.loss(inputs)\n '
logger.record_tabular((prefix + 'LossBefore'), loss_before)
logger.record_tabular((prefix + 'LossAfter'), loss_after)
logger.record_tabular((prefix + 'dLoss'), (loss_before - loss_after))
if self._use_trust_region:
logger.record_tabular((prefix + 'MeanKL'), (mean_kl / batch_count))
def predict(self, xs):
return self._f_predict(xs)
def sample_predict(self, xs):
(means, log_stds) = self._f_pdists(xs)
return self._dist.sample(dict(mean=means, log_std=log_stds))
def predict_log_likelihood(self, xs, ys):
(means, log_stds) = self._f_pdists(xs)
return self._dist.log_likelihood(ys, dict(mean=means, log_std=log_stds))
def log_likelihood_sym(self, x_var, y_var):
normalized_xs_var = ((x_var - self._x_mean_var) / self._x_std_var)
(normalized_means_var, normalized_log_stds_var) = L.get_output([self._l_mean, self._l_log_std], {self._mean_network.input_layer: normalized_xs_var})
means_var = ((normalized_means_var * self._y_std_var) + self._y_mean_var)
log_stds_var = (normalized_log_stds_var + TT.log(self._y_std_var))
return self._dist.log_likelihood_sym(y_var, dict(mean=means_var, log_std=log_stds_var))
def get_param_values(self, **tags):
return LayersPowered.get_param_values(self, **tags)
def set_param_values(self, flattened_params, **tags):
LayersPowered.set_param_values(self, flattened_params, **tags) |
def node_to_text(test, f):
(result, name, time_real) = read_test(test)
output = ('%s: Test Suite "%s" (%s)\n' % (result, name, time_real))
f.write(output)
for details in test.findall('FailureDetails'):
f.write(' Details:\n')
f.write((' Message: %s\n' % details.find('Message').text))
f.write((' Condition: %s\n' % details.find('Condition').text))
f.write((' Actual: %s\n' % details.find('Actual').text))
f.write((' Limit: %s\n' % details.find('Limit').text))
f.write((' File: %s\n' % details.find('File').text))
f.write((' Line: %s\n' % details.find('Line').text))
for child in test.findall('Test'):
node_to_text(child, f) |
def DeepR50V3PlusD(args, num_classes, criterion, criterion_aux):
print('Model : DeepLabv3+, Backbone : ResNet-50')
return DeepV3Plus(num_classes, trunk='resnet-50', criterion=criterion, criterion_aux=criterion_aux, variant='D16', skip='m1', args=args) |
def test_step_reward():
env = MetaMazeEnv()
obs = env.reset()
assert (obs == [1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0.0]).all()
env.reward_row_pos = env.reward_col_pos = 1
assert (env.row_pos == env.col_pos == 3)
(obs, reward, done, _) = env.step(2)
assert (obs == [0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0.0]).all()
assert (not done)
assert (reward == 0)
assert (env.row_pos == 3)
assert (env.col_pos == 2)
(obs, reward, done, _) = env.step(1)
assert (obs == [0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 2, (- 0.1)]).all()
assert (not done)
assert (reward == (- 0.1))
assert (env.row_pos == 3)
assert (env.col_pos == 2)
(obs, reward, done, _) = env.step(2)
assert (env.row_pos == 3)
assert (env.col_pos == 1)
(obs, reward, done, _) = env.step(0)
assert (env.row_pos == 2)
assert (env.col_pos == 1)
(obs, reward, done, _) = env.step(0)
assert (reward == 10)
assert (obs[(- 1)] == 10)
assert (obs[(- 2)] == 5)
assert (obs[(- 3)] == 1) |
class YelpFullLoader(CLSBaseLoader):
def download(self, dev_ratio: float=0.0, re_download: bool=False):
dataset_name = 'yelp-review-full'
data_dir = self._get_dataset_path(dataset_name=dataset_name)
data_dir = _split_dev(dataset_name=dataset_name, data_dir=data_dir, dev_ratio=dev_ratio, re_download=re_download, suffix='csv')
return data_dir |
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
_to_config
def __init__(self, sample_size: Optional[int]=None, in_channels: int=4, out_channels: int=4, center_input_sample: bool=False, flip_sin_to_cos: bool=True, freq_shift: int=0, down_block_types: Tuple[str]=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'), mid_block_type: str='UNetMidBlock3DCrossAttn', up_block_types: Tuple[str]=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'), only_cross_attention: Union[(bool, Tuple[bool])]=False, block_out_channels: Tuple[int]=(320, 640, 1280, 1280), layers_per_block: int=2, downsample_padding: int=1, mid_block_scale_factor: float=1, act_fn: str='silu', norm_num_groups: int=32, norm_eps: float=1e-05, cross_attention_dim: int=1280, attention_head_dim: Union[(int, Tuple[int])]=8, dual_cross_attention: bool=False, use_linear_projection: bool=False, class_embed_type: Optional[str]=None, num_class_embeds: Optional[int]=None, upcast_attention: bool=False, resnet_time_scale_shift: str='default'):
super().__init__()
self.sample_size = sample_size
time_embed_dim = (block_out_channels[0] * 4)
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
if ((class_embed_type is None) and (num_class_embeds is not None)):
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif (class_embed_type == 'timestep'):
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif (class_embed_type == 'identity'):
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = ([only_cross_attention] * len(down_block_types))
if isinstance(attention_head_dim, int):
attention_head_dim = ((attention_head_dim,) * len(down_block_types))
output_channel = block_out_channels[0]
for (i, down_block_type) in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = (i == (len(block_out_channels) - 1))
down_block = get_down_block(down_block_type, num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=(not is_final_block), resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attention_head_dim[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift)
self.down_blocks.append(down_block)
if (mid_block_type == 'UNetMidBlock3DCrossAttn'):
self.mid_block = UNetMidBlock3DCrossAttn(in_channels=block_out_channels[(- 1)], temb_channels=time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attention_head_dim[(- 1)], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention)
else:
raise ValueError(f'unknown mid_block_type : {mid_block_type}')
self.num_upsamplers = 0
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for (i, up_block_type) in enumerate(up_block_types):
is_final_block = (i == (len(block_out_channels) - 1))
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min((i + 1), (len(block_out_channels) - 1))]
if (not is_final_block):
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(up_block_type, num_layers=(layers_per_block + 1), in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=reversed_attention_head_dim[i], dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def set_attention_slice(self, slice_size):
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, 'set_attention_slice'):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if (slice_size == 'auto'):
slice_size = [(dim // 2) for dim in sliceable_head_dims]
elif (slice_size == 'max'):
slice_size = (num_slicable_layers * [1])
slice_size = ((num_slicable_layers * [slice_size]) if (not isinstance(slice_size, list)) else slice_size)
if (len(slice_size) != len(sliceable_head_dims)):
raise ValueError(f'You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.')
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if ((size is not None) and (size > dim)):
raise ValueError(f'size {size} has to be smaller or equal to {dim}.')
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, 'set_attention_slice'):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):
module.gradient_checkpointing = value
def forward(self, sample: torch.FloatTensor, timestep: Union[(torch.Tensor, float, int)], encoder_hidden_states: torch.Tensor, class_labels: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, return_dict: bool=True) -> Union[(UNet3DConditionOutput, Tuple)]:
default_overall_up_factor = (2 ** self.num_upsamplers)
forward_upsample_size = False
upsample_size = None
if any((((s % default_overall_up_factor) != 0) for s in sample.shape[(- 2):])):
logger.info('Forward upsample size to force interpolation output size.')
forward_upsample_size = True
if (attention_mask is not None):
attention_mask = ((1 - attention_mask.to(sample.dtype)) * (- 10000.0))
attention_mask = attention_mask.unsqueeze(1)
if self.config.center_input_sample:
sample = ((2 * sample) - 1.0)
timesteps = timestep
if (not torch.is_tensor(timesteps)):
is_mps = (sample.device.type == 'mps')
if isinstance(timestep, float):
dtype = (torch.float32 if is_mps else torch.float64)
else:
dtype = (torch.int32 if is_mps else torch.int64)
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
elif (len(timesteps.shape) == 0):
timesteps = timesteps[None].to(sample.device)
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
t_emb = t_emb.to(dtype=self.dtype)
emb = self.time_embedding(t_emb)
if (self.class_embedding is not None):
if (class_labels is None):
raise ValueError('class_labels should be provided when num_class_embeds > 0')
if (self.config.class_embed_type == 'timestep'):
class_labels = self.time_proj(class_labels)
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
emb = (emb + class_emb)
sample = self.conv_in(sample)
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if (hasattr(downsample_block, 'has_cross_attention') and downsample_block.has_cross_attention):
(sample, res_samples) = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask)
else:
(sample, res_samples) = downsample_block(hidden_states=sample, temb=emb)
down_block_res_samples += res_samples
sample = self.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask)
for (i, upsample_block) in enumerate(self.up_blocks):
is_final_block = (i == (len(self.up_blocks) - 1))
res_samples = down_block_res_samples[(- len(upsample_block.resnets)):]
down_block_res_samples = down_block_res_samples[:(- len(upsample_block.resnets))]
if ((not is_final_block) and forward_upsample_size):
upsample_size = down_block_res_samples[(- 1)].shape[2:]
if (hasattr(upsample_block, 'has_cross_attention') and upsample_block.has_cross_attention):
sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, upsample_size=upsample_size, attention_mask=attention_mask)
else:
sample = upsample_block(hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size)
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if (not return_dict):
return (sample,)
return UNet3DConditionOutput(sample=sample)
def from_pretrained_2d(cls, pretrained_model_path, subfolder=None):
if (subfolder is not None):
pretrained_model_path = os.path.join(pretrained_model_path, subfolder)
config_file = os.path.join(pretrained_model_path, 'config.json')
if (not os.path.isfile(config_file)):
raise RuntimeError(f'{config_file} does not exist')
with open(config_file, 'r') as f:
config = json.load(f)
config['_class_name'] = cls.__name__
config['down_block_types'] = ['CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D']
config['up_block_types'] = ['UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D']
from diffusers.utils import WEIGHTS_NAME
model = cls.from_config(config)
model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)
if (not os.path.isfile(model_file)):
raise RuntimeError(f'{model_file} does not exist')
state_dict = torch.load(model_file, map_location='cpu')
for (k, v) in model.state_dict().items():
if ('_temp.' in k):
state_dict.update({k: v})
model.load_state_dict(state_dict)
return model |
class CategoryRole(ColumnRole):
_name = 'Category'
def __init__(self, dtype: Dtype=object, encoding_type: str='auto', unknown: int=5, force_input: bool=False, label_encoded: bool=False, ordinal: bool=False):
self.dtype = dtype
self.encoding_type = encoding_type
self.unknown = unknown
self.force_input = force_input
self.label_encoded = label_encoded
self.ordinal = ordinal |
def get_data_iter(type, image_dir, batch_size, num_threads, device_id, num_gpus, crop, val_size=256, world_size=1, local_rank=0):
if (type == 'train'):
pip_train = HybridTrainPipe(batch_size=batch_size, num_threads=num_threads, device_id=local_rank, data_dir=image_dir, crop=crop, world_size=world_size, local_rank=local_rank)
pip_train.build()
dali_iter_train = DALIClassificationIterator(pip_train, size=(pip_train.epoch_size('Reader') // world_size))
return dali_iter_train
elif (type == 'val'):
pip_val = HybridValPipe(batch_size=batch_size, num_threads=num_threads, device_id=local_rank, data_dir=image_dir, crop=crop, size=val_size, world_size=world_size, local_rank=local_rank)
pip_val.build()
dali_iter_val = DALIClassificationIterator(pip_val, size=(pip_val.epoch_size('Reader') // world_size))
return dali_iter_val |
def gelu(x: stk.Matrix):
assert isinstance(x, stk.Matrix)
return stk.Matrix(x.size(), F.gelu(x.data, approximate='tanh'), x.row_indices, x.column_indices, x.offsets, x.column_indices_t, x.offsets_t, x.block_offsets_t) |
def usage(progname):
sys.stderr.write((('usage: ' + progname) + ' num_pairs N\n'))
sys.stderr.write(' num_pairs is the number of node pairs to generate\n')
sys.stderr.write(' N is the number of nodes (so generates in [0..N-1])\n')
sys.exit(1) |
def build(session_file):
f = open(session_file, 'r')
query_freq = {}
total_freq = 0
for (num, session) in enumerate(f):
session = session.strip().split('\t')
for query in session:
query_freq[query] = (query_freq.get(query, 0.0) + 1.0)
total_freq += 1
if ((num % 100000) == 0):
logger.info('{} sessions / {} nodes in the PST'.format(num, len(query_freq)))
f.close()
logger.info('-- Closing')
cPickle.dump(query_freq, open((session_file + '_QF.mdl'), 'w')) |
class _TotalOrderingMixin(object):
__slots__ = ()
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
equal = self.__eq__(other)
if (equal is NotImplemented):
return NotImplemented
return (not equal)
def __lt__(self, other):
raise NotImplementedError
def __le__(self, other):
less = self.__lt__(other)
if ((less is NotImplemented) or (not less)):
return self.__eq__(other)
return less
def __gt__(self, other):
less = self.__lt__(other)
if (less is NotImplemented):
return NotImplemented
equal = self.__eq__(other)
if (equal is NotImplemented):
return NotImplemented
return (not (less or equal))
def __ge__(self, other):
less = self.__lt__(other)
if (less is NotImplemented):
return NotImplemented
return (not less) |
def ResNet152(num_classes=10):
return ResNet(Bottleneck, layers=[3, 8, 36, 3], filters=[64, 128, 256, 512]) |
def assert_raises_fpe(strmatch, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except FloatingPointError as exc:
assert_((str(exc).find(strmatch) >= 0), ('Did not raise floating point %s error' % strmatch))
else:
assert_(False, ('Did not raise floating point %s error' % strmatch)) |
def dev():
if torch.cuda.is_available():
return torch.device(f'cuda')
return torch.device('cpu') |
(tryfirst=True)
def pytest_report_header(config):
if config._env_timeout:
return [('timeout: %ss\ntimeout func_only: %s' % (config._env_timeout, config._env_timeout_func_only))] |
class SUNDataLoader():
def __init__(self, data_path, device, is_scale=False, is_unsupervised_attr=False, is_balance=True):
print(data_path)
sys.path.append(data_path)
self.data_path = data_path
self.device = device
self.dataset = 'SUN'
print(('$' * 30))
print(self.dataset)
print(('$' * 30))
self.datadir = (self.data_path + 'data/{}/'.format(self.dataset))
self.index_in_epoch = 0
self.epochs_completed = 0
self.is_scale = is_scale
self.is_balance = is_balance
if self.is_balance:
print('Balance dataloader')
self.is_unsupervised_attr = is_unsupervised_attr
self.read_matdataset()
self.get_idx_classes()
self.I = torch.eye(self.allclasses.size(0)).to(device)
def next_batch(self, batch_size):
if self.is_balance:
idx = []
n_samples_class = max((batch_size // self.ntrain_class), 1)
sampled_idx_c = np.random.choice(np.arange(self.ntrain_class), min(self.ntrain_class, batch_size), replace=False).tolist()
for i_c in sampled_idx_c:
idxs = self.idxs_list[i_c]
idx.append(np.random.choice(idxs, n_samples_class))
idx = np.concatenate(idx)
idx = torch.from_numpy(idx)
else:
idx = torch.randperm(self.ntrain)[0:batch_size]
batch_feature = self.data['train_seen']['resnet_features'][idx].to(self.device)
batch_label = self.data['train_seen']['labels'][idx].to(self.device)
batch_att = self.att[batch_label].to(self.device)
return (batch_label, batch_feature, batch_att)
def get_idx_classes(self):
n_classes = self.seenclasses.size(0)
self.idxs_list = []
train_label = self.data['train_seen']['labels']
for i in range(n_classes):
idx_c = torch.nonzero((train_label == self.seenclasses[i].cpu())).cpu().numpy()
idx_c = np.squeeze(idx_c)
self.idxs_list.append(idx_c)
return self.idxs_list
def read_matdataset(self):
path = (self.datadir + 'feature_map_ResNet_101_{}.hdf5'.format(self.dataset))
print('_____')
print(path)
hf = h5py.File(path, 'r')
features = np.array(hf.get('feature_map'))
labels = np.array(hf.get('labels'))
trainval_loc = np.array(hf.get('trainval_loc'))
test_seen_loc = np.array(hf.get('test_seen_loc'))
test_unseen_loc = np.array(hf.get('test_unseen_loc'))
if self.is_unsupervised_attr:
print('Unsupervised Attr')
class_path = './w2v/{}_class.pkl'.format(self.dataset)
with open(class_path, 'rb') as f:
w2v_class = pickle.load(f)
assert (w2v_class.shape == (50, 300))
w2v_class = torch.tensor(w2v_class).float()
(U, s, V) = torch.svd(w2v_class)
reconstruct = torch.mm(torch.mm(U, torch.diag(s)), torch.transpose(V, 1, 0))
print('sanity check: {}'.format(torch.norm((reconstruct - w2v_class)).item()))
print('shape U:{} V:{}'.format(U.size(), V.size()))
print('s: {}'.format(s))
self.w2v_att = torch.transpose(V, 1, 0).to(self.device)
self.att = torch.mm(U, torch.diag(s)).to(self.device)
self.normalize_att = torch.mm(U, torch.diag(s)).to(self.device)
else:
print('Expert Attr')
att = np.array(hf.get('att'))
self.att = torch.from_numpy(att).float().to(self.device)
original_att = np.array(hf.get('original_att'))
self.original_att = torch.from_numpy(original_att).float().to(self.device)
w2v_att = np.array(hf.get('w2v_att'))
self.w2v_att = torch.from_numpy(w2v_att).float().to(self.device)
self.normalize_att = (self.original_att / 100)
train_feature = features[trainval_loc]
test_seen_feature = features[test_seen_loc]
test_unseen_feature = features[test_unseen_loc]
if self.is_scale:
scaler = preprocessing.MinMaxScaler()
train_feature = scaler.fit_transform(train_feature)
test_seen_feature = scaler.fit_transform(test_seen_feature)
test_unseen_feature = scaler.fit_transform(test_unseen_feature)
train_feature = torch.from_numpy(train_feature).float()
test_seen_feature = torch.from_numpy(test_seen_feature)
test_unseen_feature = torch.from_numpy(test_unseen_feature)
train_label = torch.from_numpy(labels[trainval_loc]).long()
test_unseen_label = torch.from_numpy(labels[test_unseen_loc])
test_seen_label = torch.from_numpy(labels[test_seen_loc])
self.seenclasses = torch.from_numpy(np.unique(train_label.cpu().numpy())).to(self.device)
self.unseenclasses = torch.from_numpy(np.unique(test_unseen_label.cpu().numpy())).to(self.device)
self.ntrain = train_feature.size()[0]
self.ntrain_class = self.seenclasses.size(0)
self.ntest_class = self.unseenclasses.size(0)
self.train_class = self.seenclasses.clone()
self.allclasses = torch.arange(0, (self.ntrain_class + self.ntest_class)).long()
self.data = {}
self.data['train_seen'] = {}
self.data['train_seen']['resnet_features'] = train_feature
self.data['train_seen']['labels'] = train_label
self.data['train_unseen'] = {}
self.data['train_unseen']['resnet_features'] = None
self.data['train_unseen']['labels'] = None
self.data['test_seen'] = {}
self.data['test_seen']['resnet_features'] = test_seen_feature
self.data['test_seen']['labels'] = test_seen_label
self.data['test_unseen'] = {}
self.data['test_unseen']['resnet_features'] = test_unseen_feature
self.data['test_unseen']['labels'] = test_unseen_label |
def SBM_snapshot(G_prev, alpha, sizes, probs):
G_t = G_prev.copy()
nodelist = list(range(0, sum(sizes)))
G_new = nx.stochastic_block_model(sizes, probs, nodelist=nodelist)
n = len(G_t)
if (alpha == 1.0):
return G_new
for i in range(0, n):
for j in range((i + 1), n):
prob = random.uniform(0, 1)
if (prob <= alpha):
if (G_new.has_edge(i, j) and (not G_t.has_edge(i, j))):
G_t.add_edge(i, j)
if ((not G_new.has_edge(i, j)) and G_t.has_edge(i, j)):
G_t.remove_edge(i, j)
return G_t |
class HighLevelContext():
def __init__(self, behavior: (Mapping | None)=None, attrs: (Mapping[(str, Any)] | None)=None):
self._behavior = behavior
self._attrs = attrs
self._is_finalized = False
self._attrs_from_objects = []
self._behavior_from_objects = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.finalize()
def _ensure_finalized(self):
if (not self._is_finalized):
raise RuntimeError('HighLevelContext has not yet been finalized')
def _ensure_not_finalized(self):
if self._is_finalized:
raise RuntimeError('HighLevelContext has already been finalized')
def attrs(self) -> (Mapping[(str, Any)] | None):
self._ensure_finalized()
return self._attrs
def behavior(self) -> (Mapping | None):
self._ensure_finalized()
return self._behavior
def finalize(self) -> Self:
self._ensure_not_finalized()
if (self._behavior is None):
behavior = merge_mappings(self._behavior_from_objects[::(- 1)], default=None)
else:
behavior = self._behavior
if (self._attrs is None):
attrs = merge_mappings(self._attrs_from_objects[::(- 1)], default=None)
else:
attrs = self._attrs
self._attrs = attrs
self._behavior = behavior
self._is_finalized = True
return self
def update(self, obj: T) -> T:
from awkward.highlevel import Array, ArrayBuilder, Record
self._ensure_not_finalized()
if isinstance(obj, (Array, Record, ArrayBuilder)):
if (obj._attrs is not None):
self._attrs_from_objects.append(obj._attrs)
if (obj._behavior is not None):
self._behavior_from_objects.append(obj._behavior)
return obj
def unwrap(self, obj: Any, *, allow_record: bool=True, allow_unknown: bool=False, none_policy: Literal[('error', 'promote', 'pass-through')]='error', primitive_policy: Literal[('error', 'promote', 'pass-through')]='promote', string_policy: Literal[('error', 'promote', 'pass-through', 'as-characters')]='as-characters', use_from_iter: bool=True, regulararray: bool=True) -> Any:
from awkward.operations.ak_to_layout import _impl as to_layout_impl
self.update(obj)
return to_layout_impl(obj, allow_record=allow_record, allow_unknown=allow_unknown, none_policy=none_policy, use_from_iter=use_from_iter, primitive_policy=primitive_policy, string_policy=string_policy, regulararray=regulararray)
def wrap(self, obj: Any, *, highlevel: bool=True, allow_other: bool=False) -> Any:
self._ensure_finalized()
return wrap_layout(obj, highlevel=highlevel, attrs=self._attrs, behavior=self._behavior, allow_other=allow_other) |
def test_check_input5():
with pytest.raises(TypeError, match=('Please check you are using the right metric objects,' + ' or the right order of the attributes!')):
validation_metrics_tmp = validation_metrics.copy()
validation_metrics_tmp[0] = model
trainer = Trainer(dataHandler, model, losses, validation_metrics_tmp, save_to_path, yaml_path)
trainer.train() |
class ConvBnReLU3d(ConvBn3d):
_FLOAT_MODULE = nni.ConvBnReLU3d
_FLOAT_CONV_MODULE = nn.Conv3d
_FLOAT_BN_MODULE = nn.BatchNorm3d
_FLOAT_RELU_MODULE = nn.ReLU
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=None, padding_mode='zeros', eps=1e-05, momentum=0.1, freeze_bn=False, qconfig=None):
super(ConvBnReLU3d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, eps, momentum, freeze_bn, qconfig)
def forward(self, input):
return F.relu(ConvBn3d._forward(self, input))
def from_float(cls, mod):
return super(ConvBnReLU3d, cls).from_float(mod) |
class Conv1x1(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 1, stride=stride, padding=0, bias=False, groups=groups)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x |
class PerceiverForOpticalFlow(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def test_clip():
default_clipid = 'development/1'
dataset = dcase23_task6b.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
expected_attributes = {'audio_path': os.path.join(os.path.normpath('tests/resources/sound_datasets/dcase23_task6b/'), 'development/1.wav'), 'clip_id': 'development/1'}
expected_property_types = {'audio': tuple, 'file_name': str, 'keywords': str, 'sound_id': str, 'sound_link': str, 'start_end_samples': str, 'manufacturer': str, 'license': str}
run_clip_tests(clip, expected_attributes, expected_property_types) |
def dynamic_range_compression(x, C=1, clip_val=1e-05):
return torch.log((torch.clamp(x, min=clip_val) * C)) |
def get_quantization_quantizers(node: BaseNode) -> Tuple[(Dict, List)]:
weight_quantizers = {}
activation_quantizers = []
if node.is_weights_quantization_enabled():
weight_attrs = DEFAULT_KERAS_INFO.get_kernel_op_attributes(node.type)
weight_quantizer = get_weights_quantizer_for_node(node)
for attr in weight_attrs:
weight_quantizers[attr] = weight_quantizer
if node.is_activation_quantization_enabled():
num_of_outputs = (len(node.output_shape) if isinstance(node.output_shape, list) else 1)
activation_quantizers = ([get_activations_quantizer_for_node(node)] * num_of_outputs)
return (weight_quantizers, activation_quantizers) |
def run():
logger = config.get_logger('train')
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
os.environ['TRANSFORMERS_OFFLINE'] = '1'
if (config['visualizer']['type'] != ''):
visualizer = config.initialize(name='visualizer', module=module_vis, exp_name=config['name'], web_dir=config._web_log_dir)
else:
visualizer = None
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='tcp://{}:{}'.format(args.master_address, args.master_port), rank=args.rank, world_size=args.world_size)
device = torch.device(f'cuda:{args.local_rank}')
print('world_size', args.world_size, flush=True)
print('local_rank: ', args.local_rank, flush=True)
tokenizer = transformers.AutoTokenizer.from_pretrained(config['arch']['args']['text_params']['model'], TOKENIZERS_PARALLELISM=False)
(data_loader, valid_data_loader) = init_dataloaders(config, module_data)
print('Train dataset: ', [x.n_samples for x in data_loader], ' samples')
print('Val dataset: ', [x.n_samples for x in valid_data_loader], ' samples')
model = config.initialize('arch', module_arch)
if (args.local_rank == 0):
logger.info(model)
loss = config.initialize(name='loss', module=module_loss)
metrics = [getattr(module_metric, met) for met in config['metrics']]
trainable_params = filter((lambda p: p.requires_grad), model.parameters())
for (name, param) in model.named_parameters():
if param.requires_grad:
print(name)
optimizer = config.initialize('optimizer', transformers, trainable_params)
lr_scheduler = None
if ('lr_scheduler' in config._config):
if hasattr(transformers, config._config['lr_scheduler']['type']):
lr_scheduler = config.initialize('lr_scheduler', transformers, optimizer)
else:
print('lr scheduler not found')
if config['trainer']['neptune']:
writer = ex
else:
writer = None
trainer = Multi_ObjectMCTrainer_dist(args, model, loss, metrics, optimizer, config=config, data_loader=data_loader, valid_data_loader=valid_data_loader, lr_scheduler=lr_scheduler, visualizer=visualizer, writer=writer, tokenizer=tokenizer, max_samples_per_epoch=config['trainer']['max_samples_per_epoch'])
trainer.train() |
def check_pipeline(dir_1: str, dir_2: str):
assert (os.listdir(dir_1).sort() == os.listdir(dir_2).sort() == ['test_files', 'splits'].sort())
test_path_dir1 = os.path.join(dir_1, 'test_files')
test_path_dir2 = os.path.join(dir_2, 'test_files')
if (os.path.exists(test_path_dir1) or os.path.exists(test_path_dir1)):
assert cmpfiles(test_path_dir1, test_path_dir2, os.listdir(test_path_dir1), shallow=False)
hdf5_files_1 = []
for hdf5_file in os.listdir(dir_1):
if ('.hdf5' in hdf5_file):
hdf5_files_1.append(hdf5_file)
hdf5_files_2 = []
for hdf5_file in os.listdir(dir_2):
if ('.hdf5' in hdf5_file):
hdf5_files_2.append(hdf5_file)
hdf5_files_1.sort()
hdf5_files_2.sort()
assert (hdf5_files_1 == hdf5_files_2)
hdf5_files_1 = list(map((lambda x: os.path.join(dir_1, x)), hdf5_files_1))
hdf5_files_2 = list(map((lambda x: os.path.join(dir_2, x)), hdf5_files_2))
for (hdf5_file_1, hdf5_file_2) in zip(hdf5_files_1, hdf5_files_2):
check_diff_hdf5(hdf5_file_1, hdf5_file_2) |
def check_n_clusters(n_clusters: int, n_row: int, n_min: int=0):
if (n_clusters > n_row):
raise ValueError('The number of clusters exceeds the number of rows.')
if (n_clusters < n_min):
raise ValueError('The number of clusters must be at least {}.'.format(n_min))
else:
return |
class MLP(nn.Sequential):
def __init__(self, inputs, outputs, hidden=100):
super().__init__(Flatten(inputs), nn.Linear(inputs, hidden), nn.Softplus(), nn.Linear(hidden, outputs)) |
def _empty_body_uv_results():
return OrderedDict({'body_uv': OrderedDict([('AP', (- 1)), ('AP50', (- 1)), ('AP75', (- 1)), ('APm', (- 1)), ('APl', (- 1))])}) |
def workspace(name='workspace'):
workspace = gap_workspace_file('libgap', name)
try:
workspace_mtime = os.path.getmtime(workspace)
except OSError:
return (workspace, False)
return (workspace, (workspace_mtime >= timestamp())) |
def package_files(directory, relative_parent=''):
paths = []
for filename in os.listdir(directory):
filepath = os.path.join(directory, filename)
relative_path = os.path.join(relative_parent, filename)
if os.path.isfile(filepath):
if (not str(filename).startswith('.')):
paths.append(relative_path)
else:
paths.extend(package_files(filepath, relative_path))
return paths |
def test_bitpacked_fields():
def test_single_bitpacked_fields(physical_type, compute_type, quant_bits, test_case):
ti.init(arch=ti.cpu, debug=True)
qit1 = ti.types.quant.int(quant_bits[0], True, compute_type)
qit2 = ti.types.quant.int(quant_bits[1], False, compute_type)
qit3 = ti.types.quant.int(quant_bits[2], True, compute_type)
a = ti.field(dtype=qit1)
b = ti.field(dtype=qit2)
c = ti.field(dtype=qit3)
bitpack = ti.BitpackedFields(max_num_bits=physical_type)
bitpack.place(a, b, c)
ti.root.place(bitpack)
def set_val(test_val: ti.types.ndarray()):
a[None] = test_val[0]
b[None] = test_val[1]
c[None] = test_val[2]
def verify_val(test_val: ti.types.ndarray()):
assert (a[None] == test_val[0])
assert (b[None] == test_val[1])
assert (c[None] == test_val[2])
set_val(test_case)
verify_val(test_case)
ti.reset()
test_single_bitpacked_fields(8, ti.i8, [3, 3, 2], np.array([((2 ** 2) - 1), ((2 ** 3) - 1), (- (2 ** 1))]))
test_single_bitpacked_fields(16, ti.i16, [4, 7, 5], np.array([((2 ** 3) - 1), ((2 ** 7) - 1), (- (2 ** 4))]))
test_single_bitpacked_fields(32, ti.i32, [17, 11, 4], np.array([((2 ** 16) - 1), ((2 ** 10) - 1), (- (2 ** 3))]))
test_single_bitpacked_fields(64, ti.i64, [32, 23, 9], np.array([((2 ** 31) - 1), ((2 ** 23) - 1), (- (2 ** 8))]))
test_single_bitpacked_fields(32, ti.i16, [7, 12, 13], np.array([((2 ** 6) - 1), ((2 ** 12) - 1), (- (2 ** 12))]))
test_single_bitpacked_fields(64, ti.i32, [18, 22, 24], np.array([((2 ** 17) - 1), ((2 ** 22) - 1), (- (2 ** 23))]))
test_single_bitpacked_fields(16, ti.i16, [5, 5, 6], np.array([15, 5, 20]))
test_single_bitpacked_fields(32, ti.i32, [10, 10, 12], np.array([11, 19, 2020])) |
def draw_overlay(img, mask, color=[0, 0, 255], op=0.5):
img[np.where(mask)] = ((img[np.where(mask)] * (1 - op)) + (np.array(color) * op)) |
def get_logger(logdir):
logger = logging.getLogger('emotion')
ts = str(datetime.datetime.now()).split('.')[0].replace(' ', '_')
ts = ts.replace(':', '_').replace('-', '_')
file_path = os.path.join(logdir, 'run_{}.log'.format(ts))
hdlr = logging.FileHandler(file_path)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
return logger |
def DeepR152V3PlusD_OS8(args, num_classes, criterion, criterion_aux):
print('Model : DeepLabv3+, Backbone : ResNet-152')
return DeepV3Plus(num_classes, trunk='resnet-152', criterion=criterion, criterion_aux=criterion_aux, variant='D', skip='m1', args=args) |
class WQSymBases(Category_realization_of_parent):
def __init__(self, base, graded):
self._graded = graded
Category_realization_of_parent.__init__(self, base)
def _repr_(self):
if self._graded:
type_str = 'graded'
else:
type_str = 'filtered'
return 'Category of {} bases of {}'.format(type_str, self.base())
def super_categories(self):
R = self.base().base_ring()
cat = HopfAlgebras(R).Graded().WithBasis()
if self._graded:
cat = cat.Graded()
else:
cat = cat.Filtered()
return [self.base().Realizations(), HopfAlgebras(R).Graded().Realizations(), cat.Connected()]
class ParentMethods():
def _repr_(self):
return '{} in the {} basis'.format(self.realization_of(), self._basis_name)
def __getitem__(self, p):
if (p in ZZ):
p = [ZZ(p)]
if all(((s in ZZ) for s in p)):
return self.monomial(self._indices.from_finite_word([ZZ(s) for s in p]))
if all((isinstance(s, str) for s in p)):
return self.monomial(self._indices.from_finite_word(p))
try:
return self.monomial(self._indices(p))
except TypeError:
raise ValueError(('cannot convert %s into an element of %s' % (p, self._indices)))
def is_field(self, proof=True):
return False
def is_commutative(self):
return self.base_ring().is_zero()
def one_basis(self):
OSP = self.basis().keys()
return OSP([])
def degree_on_basis(self, t):
return sum((len(part) for part in t))
class ElementMethods():
def algebraic_complement(self):
parent = self.parent()
M = parent.realization_of().M()
dct = {I.reversed(): coeff for (I, coeff) in M(self)}
return parent(M._from_dict(dct, remove_zeros=False))
def coalgebraic_complement(self):
parent = self.parent()
M = parent.realization_of().M()
dct = {I.complement(): coeff for (I, coeff) in M(self)}
return parent(M._from_dict(dct, remove_zeros=False))
def star_involution(self):
parent = self.parent()
M = parent.realization_of().M()
dct = {I.reversed().complement(): coeff for (I, coeff) in M(self)}
return parent(M._from_dict(dct, remove_zeros=False))
def to_quasisymmetric_function(self):
from sage.combinat.ncsf_qsym.qsym import QuasiSymmetricFunctions
M = QuasiSymmetricFunctions(self.parent().base_ring()).Monomial()
MW = self.parent().realization_of().M()
return M.sum_of_terms(((i.to_composition(), coeff) for (i, coeff) in MW(self))) |
class SIMPLE_LAYER(torch.nn.Module):
def __init__(self, feat_in, feat_out):
super(SIMPLE_LAYER, self).__init__()
self.temp_layer = Linear(feat_in, feat_out)
def forward(self, x, edge_index):
return self.temp_layer(x) |
def dyn_batch_without_padding(new, i, sofar):
if args.distillation:
return (sofar + max(len(new.src), len(new.trg), len(new.dec)))
else:
return (sofar + max(len(new.src), len(new.trg))) |
def get_union_variant(x: Field):
is_dyn_array = (x.count and (not isinstance(x.count, int)))
is_ptr = (x.by_ref or x.by_mut or is_dyn_array)
name = _T(x.name)
out = '[FieldOffset(0)] '
if is_ptr:
out += f'IntPtr {name}'
elif x.count:
out += f'[MarshalAs(UnmanagedType.ByValArray, SizeConst={x.count})] '
out += f'public {get_type_name(x.type)}[] {name}'
else:
out += f'public {get_type_name(x.type)} {name}'
return out |
class PropertyDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
objtype = 'property'
member_order = 60
priority = (AttributeDocumenter.priority + 1)
def can_document_member(cls, member: Any, membername: str, isattr: bool, parent: Any) -> bool:
if isinstance(parent, ClassDocumenter):
if inspect.isproperty(member):
return True
else:
__dict__ = safe_getattr(parent.object, '__dict__', {})
obj = __dict__.get(membername)
return (isinstance(obj, classmethod) and inspect.isproperty(obj.__func__))
else:
return False
def import_object(self, raiseerror: bool=False) -> bool:
ret = super().import_object(raiseerror)
if (ret and (not inspect.isproperty(self.object))):
__dict__ = safe_getattr(self.parent, '__dict__', {})
obj = __dict__.get(self.objpath[(- 1)])
if (isinstance(obj, classmethod) and inspect.isproperty(obj.__func__)):
self.object = obj.__func__
self.isclassmethod = True
return True
else:
return False
self.isclassmethod = False
return ret
def document_members(self, all_members: bool=False) -> None:
pass
def get_real_modname(self) -> str:
real_modname = self.get_attr((self.parent or self.object), '__module__', None)
return (real_modname or self.modname)
def add_directive_header(self, sig: str) -> None:
super().add_directive_header(sig)
sourcename = self.get_sourcename()
if inspect.isabstractmethod(self.object):
self.add_line(' :abstractmethod:', sourcename)
if self.isclassmethod:
self.add_line(' :classmethod:', sourcename)
if safe_getattr(self.object, 'fget', None):
func = self.object.fget
elif safe_getattr(self.object, 'func', None):
func = self.object.func
else:
func = None
if (func and (self.config.autodoc_typehints != 'none')):
try:
signature = inspect.signature(func, type_aliases=self.config.autodoc_type_aliases)
if (signature.return_annotation is not Parameter.empty):
if (self.config.autodoc_typehints_format == 'short'):
objrepr = stringify_annotation(signature.return_annotation, 'smart')
else:
objrepr = stringify_annotation(signature.return_annotation)
self.add_line((' :type: ' + objrepr), sourcename)
except TypeError as exc:
logger.warning(__('Failed to get a function signature for %s: %s'), self.fullname, exc)
return None
except ValueError:
return None |
class NumericRange():
def __init__(self, ranges, inclusive_intervals=None, null_value=None, is_not_null_condition=False):
self.is_not_null_condition = is_not_null_condition
self.ranges = ranges
self.null_value = null_value
self.inclusive_intervals = inclusive_intervals
if (self.inclusive_intervals is None):
self.inclusive_intervals = []
for interval in self.ranges:
self.inclusive_intervals.append([True, True])
def is_impossible(self):
return (len(self.ranges) == 0)
def get_ranges(self):
return self.ranges |
def default_conv(in_channels, out_channels, kernel_size, bias=True, groups=1):
return nn.Conv2d(in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias, groups=groups) |
def EmbeddingLookupFeatures(params, sparse_features, allow_weights):
if (not isinstance(params, list)):
params = [params]
sparse_features = tf.convert_to_tensor(sparse_features)
(indices, ids, weights) = gen_parser_ops.unpack_sparse_features(sparse_features)
embeddings = tf.nn.embedding_lookup(params, ids)
if allow_weights:
broadcast_weights_shape = tf.concat(0, [tf.shape(weights), [1]])
embeddings *= tf.reshape(weights, broadcast_weights_shape)
return tf.unsorted_segment_sum(embeddings, indices, tf.size(sparse_features)) |
def timeit(f):
WINDOW_SIZE = 128
timeit._elapsed = defaultdict((lambda : deque(maxlen=WINDOW_SIZE)))
def summarize():
print('\x1b[33m----- Summarize -----\x1b[0m')
for (k, q) in timeit._elapsed.items():
print(f'{k:55s} took: {np.mean(q):.5f} sec [{len(q)} samples]')
timeit.summarize = summarize
(f)
def wrap(*args, **kwargs):
t = time.time()
result = f(*args, **kwargs)
elapsed = (time.time() - t)
timeit._elapsed[repr(f)].append(elapsed)
return result
return wrap |
def get_bn_params(sess, name):
moving_mean_tensor = sess.graph.get_tensor_by_name(os.path.join(name, 'moving_mean:0'))
moving_var_tensor = sess.graph.get_tensor_by_name(os.path.join(name, 'moving_variance:0'))
beta_tensor = sess.graph.get_tensor_by_name(os.path.join(name, 'beta:0'))
moving_mean = sess.run(moving_mean_tensor)
moving_var = sess.run(moving_var_tensor)
beta = sess.run(beta_tensor)
return (moving_mean, moving_var, beta) |
def test_listarrayA64():
for depth in (0, 1, 2, 3):
for cuts in itertools.permutations((0, 1, 4, (- 5)), depth):
assert (to_list(modelA[cuts]) == to_list(listarrayA64[cuts]))
if (depth < 3):
assert (listarrayA64.to_typetracer()[cuts].form == listarrayA64[cuts].form)
for depth in (0, 1, 2, 3):
for cuts in itertools.permutations((slice(None), slice(1, None), slice(None, (- 1)), slice(None, None, 2)), depth):
assert (to_list(modelA[cuts]) == to_list(listarrayA64[cuts]))
if (depth < 3):
assert (listarrayA64.to_typetracer()[cuts].form == listarrayA64[cuts].form)
for depth in (0, 1, 2, 3):
for cuts in itertools.permutations((slice(1, None), slice(None, (- 1)), 2, (- 2)), depth):
assert (to_list(modelA[cuts]) == to_list(listarrayA64[cuts]))
if (depth < 3):
assert (listarrayA64.to_typetracer()[cuts].form == listarrayA64[cuts].form)
for depth in (0, 1, 2, 3):
for cuts in itertools.permutations(([2, 0, 0, 1], [1, (- 2), 0, (- 1)], 2, (- 2)), depth):
assert (to_list(modelA[cuts]) == to_list(listarrayA64[cuts]))
if (depth < 3):
assert (listarrayA64.to_typetracer()[cuts].form == listarrayA64[cuts].form)
for depth in (0, 1, 2, 3):
for cuts in itertools.permutations(([2, 0, 0, 1], [1, (- 2), 0, (- 1)], slice(1, None), slice(None, (- 1))), depth):
while ((len(cuts) > 0) and isinstance(cuts[0], slice)):
cuts = cuts[1:]
while ((len(cuts) > 0) and isinstance(cuts[(- 1)], slice)):
cuts = cuts[:(- 1)]
if any((isinstance(x, slice) for x in cuts)):
continue
assert (to_list(modelA[cuts]) == to_list(listarrayA64[cuts]))
if (depth < 3):
assert (listarrayA64.to_typetracer()[cuts].form == listarrayA64[cuts].form) |
class TensorOutputOp():
Template = '\n${visitor}\n\nusing ${instance_name} = cutlass::epilogue::threadblock::VisitorOpTensorOutput<\n ${element_accumulator}, ${output_tile_iterator}, ${visitor_name}>;\n'
counter = 0
def __init__(self, element_accumulator, visitor) -> None:
self.element_accumulator = element_accumulator
self.visitor = visitor
self.instance_name = ('TensorOutputOp%d' % TensorOutputOp.counter)
TensorOutputOp.counter += 1
class _Arguments(ctypes.Structure):
_fields_ = [('output_ptr', ctypes.c_void_p), ('ldt', ctypes.c_int), ('batch_stride', ctypes.c_longlong), ('visitor_arg', self.visitor.argument_type)]
def __init__(self, output_ptr, ldt, visitor_arg, batch_stride=0) -> None:
self.output_ptr = int(output_ptr)
self.ldt = int(ldt)
self.visitor_arg = visitor_arg
self.batch_stride = batch_stride
self.argument_type = _Arguments
def emit(self, operation):
values = {'instance_name': self.instance_name, 'element_accumulator': DataTypeTag[self.element_accumulator], 'output_tile_iterator': (operation.procedural_name() + '_default::Epilogue::OutputTileIterator'), 'visitor_name': self.visitor.instance_name, 'visitor': self.visitor.emit(operation)}
return SubstituteTemplate(self.Template, values) |
def generate_python_code(matcher):
cg = CodeGenerator(matcher)
(a, b) = cg.generate_code()
return (a, b) |
def close_file():
global _FILE
if (not (_FILE is None)):
_FILE.close()
_FILE = None |
def format_next(text, new_text, pos, can_newline, width, ispaces):
new_len = len(new_text)
if (((pos + new_len) > width) and can_newline):
text += (('\n' + ispaces) + new_text)
pos = new_len
can_newline = False
else:
if (pos > 0):
text += (' ' + new_text)
pos += (new_len + 1)
else:
text += new_text
pos += new_len
can_newline = True
return (text, pos, can_newline) |
def check_spec_implementation():
count = 0
with open(os.path.join(CURRENT_DIR, '..', 'kernel-specification.yml')) as specfile:
indspec = yaml.safe_load(specfile)['kernels']
for spec in indspec:
if ('def awkward' not in spec['definition']):
if (count == 0):
print('\nKernels not implemented in specification file - ')
print(spec['name'])
count += 1 |
def evaluate(model, weights, dataset, datatype, split, count, shot, seed, gpu, hist_path, seg_path):
print('evaluating {} with weights {} on {} {}-{}'.format(model, weights, datatype, dataset, split))
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu)
device = torch.device('cuda:0')
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
prepare_data = datatypes[datatype]
dataset = prepare_data(dataset, split, count=count, shot=shot)
loader = prepare_loader(dataset, evaluation=True)
model = prepare_model(model, dataset.num_classes, weights=weights).to(device)
model.eval()
loss_fn = nn.CrossEntropyLoss(reduction='mean', ignore_index=dataset.ignore_index)
total_loss = 0.0
metrics = SegScorer(len(dataset.classes))
with torch.no_grad():
for (i, data) in enumerate(loader):
(inputs, target, aux) = (data[:(- 2)], data[(- 2)], data[(- 1)])
inputs = [(inp.to(device) if (not isinstance(inp, list)) else [[i_.to(device) for i_ in in_] for in_ in inp]) for inp in inputs]
target = target.to(device)
scores = model(*inputs)
loss = loss_fn(scores, target)
total_loss += loss.item()
(_, seg) = scores.data[0].max(0)
metrics.update(seg.to('cpu').numpy(), target.to('cpu').numpy(), aux)
if (seg_path is not None):
seg = Image.fromarray(seg.to('cpu').numpy().astype(np.uint8), mode='P')
save_id = f"{aux['slug']}_{aux.get('cls', 'all')}_{aux.get('inst', 'all')}"
seg.save(f'{seg_path}/{save_id}.png')
print('loss {}'.format((total_loss / len(dataset))))
for (metric, score) in metrics.score().items():
score = np.nanmean(score)
print('{:10s} {:.3f}'.format(metric, score))
if (hist_path is not None):
metrics.save(hist_path) |
class PLBartTokenizer(metaclass=DummyObject):
_backends = ['sentencepiece']
def __init__(self, *args, **kwargs):
requires_backends(self, ['sentencepiece']) |
def read_script_from_list_string(list_string):
script_lines = []
f = list_string
index = 1
for line in f:
if ('[' not in line):
continue
line = line.strip()
if ((len(line) > 0) and (not line.startswith('#'))):
script_lines.append(parse_script_line(line, index))
index += 1
return Script(script_lines) |
class MultiscaleCombinedHeadLongTemporalWindow(nn.Module):
def __init__(self, in_channels, num_classes, variance_output, variance_per_axis, **kwargs):
super().__init__()
self.embedding_size = 3
self.variance_channels = ((self.embedding_size if variance_per_axis else 1) if variance_output else 0)
self.seed_map = kwargs.get('seed_map', False)
nonlocal_inter_channels = kwargs.get('nonlocal_inter_channels', 128)
conv_inter_channels = kwargs.get('conv_inter_channels', 128)
self.add_spatial_coord = kwargs.get('add_spatial_coord', False)
if (not self.add_spatial_coord):
print('Spatial coordinates are not added to the feature maps in the embedding head')
nl_in_channels = ((in_channels + 3) if self.add_spatial_coord else in_channels)
self.nonlocal_32x = NonLocalBlock3DWithDownsampling(nl_in_channels, nonlocal_inter_channels, 1, in_channels)
self.conv_32x_1 = nn.Conv3d(in_channels, conv_inter_channels, kernel_size=(1, 3, 3), padding=(0, 1, 1))
self.conv_32x_2 = nn.Conv3d(conv_inter_channels, conv_inter_channels, kernel_size=3, padding=1)
self.nonlocal_16x = NonLocalBlock3DWithDownsampling(nl_in_channels, nonlocal_inter_channels, 1, in_channels)
self.conv_16x_1 = nn.Conv3d(in_channels, conv_inter_channels, kernel_size=(1, 3, 3), padding=(0, 1, 1))
self.conv_8x_1 = nn.Conv3d(in_channels, conv_inter_channels, kernel_size=3, padding=(1, 2, 2), dilation=(1, 2, 2))
self.conv_4x_1 = nn.Conv3d(in_channels, conv_inter_channels, kernel_size=3, padding=1)
self.conv_semseg = nn.Conv3d((conv_inter_channels * 4), conv_inter_channels, kernel_size=3, padding=1)
self.conv_semseg_out = nn.Conv3d(conv_inter_channels, num_classes, kernel_size=1, padding=0, bias=False)
self.conv_embedding = nn.Conv3d((conv_inter_channels * 4), conv_inter_channels, kernel_size=3, padding=1)
self.conv_embedding_out = nn.Conv3d(conv_inter_channels, self.embedding_size, kernel_size=1, padding=0, bias=False)
if (self.variance_channels > 0):
self.conv_variance_out = nn.Conv3d(conv_inter_channels, self.variance_channels, kernel_size=1, padding=0, bias=True)
if self.seed_map:
self.conv_seed_out = nn.Conv3d(conv_inter_channels, 1, kernel_size=1, padding=0, bias=True)
self.register_buffer('time_scale', torch.tensor(kwargs.get('time_scale', 0.2), dtype=torch.float32))
self.register_buffer('tanh_premultiplier', torch.tensor(0.25, dtype=torch.float32))
def forward_32_8(self, x):
(N, C, T, H, W) = x.shape
grid = self.nonlocal_32x.create_spatiotemporal_grid(H, W, T, self.time_scale, x.dtype, x.device).unsqueeze(0).expand(N, (- 1), (- 1), (- 1), (- 1))
t = (torch.cat((x, grid.detach()), dim=1) if self.add_spatial_coord else x)
x = (x + self.nonlocal_32x(t))
x = F.interpolate(x, scale_factor=2.0, mode='trilinear', align_corners=False)
x = F.relu(self.conv_32x_1(x))
x = F.interpolate(x, scale_factor=2.0, mode='trilinear', align_corners=False)
x = F.relu(self.conv_32x_2(x))
return F.interpolate(x, scale_factor=2.0, mode='trilinear', align_corners=False)
def forward_16_4(self, x):
(N, C, T, H, W) = x.shape
grid = self.nonlocal_16x.create_spatiotemporal_grid(H, W, T, self.time_scale, x.dtype, x.device).unsqueeze(0).expand(N, (- 1), (- 1), (- 1), (- 1))
t = (torch.cat((x, grid.detach()), dim=1) if self.add_spatial_coord else x)
x = (x + self.nonlocal_16x(t))
x = F.interpolate(x, scale_factor=2.0, mode='trilinear', align_corners=False)
x = F.relu(self.conv_16x_1(x))
return F.interpolate(x, scale_factor=2.0, mode='trilinear', align_corners=False)
def forward_8_2(self, x):
x = F.relu(self.conv_8x_1(x))
return F.interpolate(x, scale_factor=2.0, mode='trilinear', align_corners=False)
def forward_4_1(self, x):
return F.relu(self.conv_4x_1(x))
def semseg_branch(self, x):
x = F.relu(self.conv_semseg(x))
return self.conv_semseg_out(x)
def embedding_branch(self, x):
x = F.relu(self.conv_embedding(x))
(N, C, T, H, W) = x.shape
grid = self.nonlocal_32x.create_spatiotemporal_grid(H, W, T, self.time_scale, x.dtype, x.device).unsqueeze(0).expand(N, (- 1), (- 1), (- 1), (- 1))
embeddings = self.conv_embedding_out(x)
embeddings = ((embeddings * self.tanh_premultiplier).tanh() + grid.detach())
if (self.variance_channels > 0):
variances = self.conv_variance_out(x)
embeddings = torch.cat((embeddings, variances), dim=1)
if self.seed_map:
seed = self.conv_seed_out(x)
embeddings = torch.cat((embeddings, seed), dim=1)
return embeddings
def forward(self, x):
assert (len(x) == 4)
scale_forward_fns = [self.forward_32_8, self.forward_16_4, self.forward_8_2, self.forward_4_1]
x = [fn(feats) for (fn, feats) in zip(scale_forward_fns, x)]
x = torch.cat(x, dim=1)
semseg_logits = self.semseg_branch(x)
embeddings = self.embedding_branch(x)
return (semseg_logits, embeddings) |
def l2_dist(x, y, pw=False):
if (pw is False):
x = x.unsqueeze(1)
y = y.unsqueeze(0)
return (- th.norm((x - y), p=2, dim=(- 1))) |
def test_fpn_carafe():
FPN_CARAFE(in_channels=[8, 16, 32, 64], out_channels=8, start_level=0, end_level=3, num_outs=4)
FPN_CARAFE(in_channels=[8, 16, 32, 64], out_channels=8, start_level=0, end_level=(- 1), num_outs=4)
with pytest.raises(AssertionError):
FPN_CARAFE(in_channels=[8, 16, 32, 64], out_channels=8, start_level=1, end_level=4, num_outs=2)
with pytest.raises(AssertionError):
FPN_CARAFE(in_channels=[8, 16, 32, 64], out_channels=8, start_level=1, end_level=2, num_outs=3) |
def interpolate_hermite(images, camera_id, file_format):
if (len(images) < 4):
raise ValueError('Need at least four images for Hermite spline interpolation!')
new_images = []
T0 = image_to_idx(images[0])
dq0 = DualQuaternion.FromQT(images[0].q, images[0].t)
T1 = image_to_idx(images[1])
dq1 = DualQuaternion.FromQT(images[1].q, images[1].t)
if (dq0.q0.dot(dq1.q0) < 0):
dq1 = (- dq1)
dT = (1.0 / float((T1 - T0)))
for j in xrange(1, (T1 - T0)):
t = (j * dT)
dq = (((1.0 - t) * dq0) + (t * dq1)).normalize()
new_images.append(Image(file_format.format((T0 + j)), camera_id, *dq.ToQT()))
T2 = image_to_idx(images[2])
dq2 = DualQuaternion.FromQT(images[2].q, images[2].t)
if (dq1.q0.dot(dq2.q0) < 0):
dq2 = (- dq2)
for i in xrange(1, (len(images) - 2)):
T3 = image_to_idx(images[(i + 2)])
dq3 = DualQuaternion.FromQT(images[(i + 2)].q, images[(i + 2)].t)
if (dq2.q0.dot(dq3.q0) < 0):
dq3 = (- dq3)
prev_duration = (T1 - T0)
current_duration = (T2 - T1)
next_duration = (T3 - T2)
dt1 = (1.0 / float((T2 - T0)))
dt2 = (1.0 / float((T3 - T1)))
m1 = (((current_duration * dt1) * (dq2 - dq1)) + ((prev_duration * dt1) * (dq1 - dq0)))
m2 = (((next_duration * dt2) * (dq3 - dq2)) + ((current_duration * dt2) * (dq2 - dq1)))
dT = (1.0 / float(current_duration))
for j in xrange(1, current_duration):
t = (j * dT)
t2 = (t * t)
t3 = (t2 * t)
a1 = (((2.0 * t3) - (3.0 * t2)) + 1.0)
b1 = ((t3 - (2.0 * t2)) + t)
a2 = (((- 2.0) * t3) + (3.0 * t2))
b2 = (t3 - t2)
dq = ((((a1 * dq1) + (b1 * m1)) + (a2 * dq2)) + (b2 * m2)).normalize()
new_images.append(Image(file_format.format((T1 + j)), camera_id, *dq.ToQT()))
(T0, T1, T2) = (T1, T2, T3)
(dq0, dq1, dq2) = (dq1, dq2, dq3)
dT = (1.0 / float((T2 - T1)))
for j in xrange(1, (T2 - T1)):
t = (j * dT)
dq = (((1.0 - t) * dq1) + (t * dq2)).normalize()
new_images.append(Image(file_format.format((T1 + j)), camera_id, *dq.ToQT()))
return new_images |
def _pipeline_parallel_post_init(cfg: DistributedTrainingConfig, num_pipeline_devices, num_pipelines_per_node):
if (not cfg.distributed_no_spawn):
assert ((cfg.distributed_world_size % num_pipeline_devices) == 0)
cfg.distributed_world_size = (cfg.distributed_world_size // num_pipeline_devices)
gpus_per_node = torch.cuda.device_count()
assert ((cfg.distributed_rank % gpus_per_node) == 0)
assert ((cfg.distributed_rank % num_pipeline_devices) == 0)
with open_dict(cfg):
cfg.distributed_rank = (cfg.distributed_rank // num_pipeline_devices)
cfg.distributed_num_procs = num_pipelines_per_node
cfg.device_id *= num_pipeline_devices
if (cfg.device_id > 0):
logger.debug('setting CUDA device={} on rank {}'.format(cfg.device_id, cfg.distributed_rank))
torch.cuda.set_device(cfg.device_id)
with open_dict(cfg):
cfg.pipeline_devices = [(cfg.device_id + d) for d in cfg.pipeline_devices]
logger.info('setting pipeline_devices={} on rank {}'.format(cfg.pipeline_devices, cfg.distributed_rank)) |
class BaseDataLoader(object):
def __init__(self):
pass
def initialize(self, opt):
self.opt = opt
pass
def load_data(self):
return None |
def print_header(colwidth=16, sep=' '):
items = []
for item in BenchResult._fields:
items.append(fit_str(item))
return sep.join(items) |
def is_mods(fn: str, mods: Collection[str]) -> bool:
import re
return any([is_mod(fn, mod) for mod in mods]) |
class AverageMeters():
def __init__(self):
super().__init__()
self.average_meters = {}
def add_loss_value(self, loss_name, loss_val, n=1):
if (loss_name not in self.average_meters):
self.average_meters[loss_name] = AverageMeter()
self.average_meters[loss_name].update(loss_val, n=n) |
def copy_to_gpu(gpu: bool, tensor: T) -> T:
if gpu:
return tensor.cuda()
else:
return tensor |
class COCODatasetBase(ReidBaseDataModule):
def __init__(self, cfg, **kwargs):
super().__init__(cfg, **kwargs)
assert (cfg.DATASETS.JSON_TRAIN_PATH != ''), 'DATASETS.JSON_TRAIN_PATH is not specified in the config'
self.dataset_dir = cfg.DATASETS.ROOT_DIR
self.json_train_path = cfg.DATASETS.JSON_TRAIN_PATH
self.json_query_path = self.json_train_path.replace('train', 'query')
self.json_gallery_path = self.json_train_path.replace('train', 'gallery')
self.train_dir = osp.join(self.dataset_dir, 'train')
self.query_dir = osp.join(self.dataset_dir, 'query')
self.gallery_dir = osp.join(self.dataset_dir, 'gallery')
self.use_resampling = cfg.DATALOADER.USE_RESAMPLING
def setup(self):
self._check_before_run()
transforms_base = ReidTransforms(self.cfg)
self.train_json = self._load_json(self.json_train_path)
self.query_json = self._load_json(self.json_query_path)
self.gallery_json = self._load_json(self.json_gallery_path)
(train, train_dict) = self._process_dir(self.train_dir, self.train_json)
self.train_dict = train_dict
self.train_list = train
(gallery, gallery_dict) = self._process_dir(self.gallery_dir, self.gallery_json)
(query, query_dict) = self._process_dir(self.query_dir, self.query_json)
self.query_list = query
self.gallery_list = gallery
self.train = BaseDatasetLabelledPerPid(train_dict, transforms_base.build_transforms(is_train=True), self.num_instances, self.use_resampling)
self.val = BaseDatasetLabelled((query + gallery), transforms_base.build_transforms(is_train=False))
self._print_dataset_statistics(train, query, gallery)
(num_train_pids, num_train_imgs, num_train_cams) = self._get_imagedata_info(train)
self.num_query = len(query)
self.num_classes = num_train_pids
def _check_before_run(self):
super()._check_before_run()
if (not osp.exists(self.json_train_path)):
raise RuntimeError("'{}' is not available".format(self.json_train_path))
if (not osp.exists(self.json_query_path)):
raise RuntimeError("'{}' is not available".format(self.json_query_path))
if (not osp.exists(self.json_gallery_path)):
raise RuntimeError("'{}' is not available".format(self.json_gallery_path))
def _process_dir(self, images_path, json_file, relabel=False):
if ('gallery' in images_path.lower()):
camid = 1
else:
camid = 0
annotations_pair_ids = np.array([item['pair_id'] for item in json_file['annotations']])
_unique_pair_ids_set = list(set(annotations_pair_ids))
image_ids = np.array([item['image_id'] for item in json_file['annotations']])
image_info_ids = np.array([item['id'] for item in json_file['images']])
image_info = np.array(json_file['images'])
image_filenames = np.array([item['file_name'] for item in json_file['images']])
image_ids_dict = {k: v for (v, k) in enumerate(image_info_ids)}
len_data = len(image_info_ids)
if ('train' in images_path.lower()):
relabel = True
mode = 'train'
elif ('query' in images_path.lower()):
mode = 'query'
else:
mode = 'gallery'
if (mode == 'train'):
unique_pair_ids_set = set()
print('Filtering train dataset to remove pair_ids with only 1 image...')
n = 0
for (idx, pair_id) in enumerate(tqdm(_unique_pair_ids_set)):
assert (pair_id >= 0)
inds = np.where((annotations_pair_ids == pair_id))[0]
image_ids_selected = image_ids[inds]
image_info_inds = [image_ids_dict[id_] for id_ in image_ids_selected if (id_ in image_ids_dict)]
image_filenames_selected = image_filenames[image_info_inds]
num_files = len(image_filenames_selected)
if (num_files <= 1):
n += 1
continue
unique_pair_ids_set.add(pair_id)
print(f'Filtered out {n} pair ids with single image')
else:
unique_pair_ids_set = _unique_pair_ids_set
unique_pair_ids_set = sorted(list(unique_pair_ids_set))
if relabel:
pid2label = {pid: label for (label, pid) in enumerate(unique_pair_ids_set)}
dataset_dict = defaultdict(list)
dataset = []
len_ids = []
for (idx, pair_id) in enumerate(tqdm(unique_pair_ids_set)):
assert (pair_id >= 0)
inds = np.where((annotations_pair_ids == pair_id))[0]
image_ids_selected = image_ids[inds]
image_info_inds = [image_ids_dict[id_] for id_ in image_ids_selected if (id_ in image_ids_dict)]
image_filenames_selected = image_filenames[image_info_inds]
num_files = len(image_filenames_selected)
len_ids.append(num_files)
if relabel:
pair_id = pid2label[pair_id]
for image_entry in image_filenames_selected:
image_path = osp.join(images_path, image_entry)
dataset.append((image_path, pair_id, camid, mode))
dataset_dict[pair_id].append((image_path, pair_id, camid, mode))
return (dataset, dataset_dict)
def train_dataloader(self, cfg, trainer, sampler_name: str='random_identity', **kwargs):
if (trainer.distributed_backend == 'ddp_spawn'):
rank = trainer.root_gpu
else:
rank = trainer.local_rank
world_size = (trainer.num_nodes * trainer.num_processes)
sampler = get_sampler(sampler_name, data_source=self.train_dict, batch_size=self.cfg.SOLVER.IMS_PER_BATCH, num_instances=self.num_instances, world_size=world_size, rank=rank)
return DataLoader(self.train, self.cfg.SOLVER.IMS_PER_BATCH, num_workers=self.num_workers, shuffle=False, sampler=sampler, collate_fn=collate_fn_alternative, **kwargs) |
def _do_bistochastic_test(scaled):
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(), scaled.sum(axis=1).mean(), decimal=1) |
class ConfusionMatrix():
def __init__(self, actual_vector=None, predict_vector=None, matrix=None, digit=5, threshold=None, file=None, sample_weight=None, transpose=False, classes=None, is_imbalanced=None, metrics_off=False):
self.actual_vector = actual_vector
self.predict_vector = predict_vector
self.metrics_off = metrics_off
self.prob_vector = None
self.digit = digit
self.weights = None
self.classes = None
self.imbalance = None
if isinstance(transpose, bool):
self.transpose = transpose
else:
self.transpose = False
if isfile(file):
matrix_param = __obj_file_handler__(self, file)
elif isinstance(matrix, dict):
matrix_param = __obj_matrix_handler__(matrix, classes, self.transpose)
elif isinstance(matrix, (list, numpy.ndarray)):
matrix_param = __obj_array_handler__(matrix, classes, self.transpose)
else:
matrix_param = __obj_vector_handler__(self, actual_vector, predict_vector, threshold, sample_weight, classes)
__obj_assign_handler__(self, matrix_param)
if (not metrics_off):
__class_stat_init__(self)
__overall_stat_init__(self)
__imbalancement_handler__(self, is_imbalanced)
self.binary = binary_check(self.classes)
self.recommended_list = statistic_recommend(self.classes, self.imbalance)
self.sparse_matrix = None
self.sparse_normalized_matrix = None
self.positions = None
self.label_map = {x: x for x in self.classes}
def print_matrix(self, one_vs_all=False, class_name=None, sparse=False):
classes = self.classes
table = self.table
if one_vs_all:
[classes, table] = one_vs_all_func(classes, table, self.TP, self.TN, self.FP, self.FN, class_name)
if (sparse is True):
if (self.sparse_matrix is None):
self.sparse_matrix = sparse_matrix_calc(classes, table)
print(sparse_table_print(self.sparse_matrix))
else:
print(table_print(classes, table))
if (len(classes) >= CLASS_NUMBER_THRESHOLD):
warn(CLASS_NUMBER_WARNING, RuntimeWarning)
def print_normalized_matrix(self, one_vs_all=False, class_name=None, sparse=False):
classes = self.classes
table = self.table
normalized_table = self.normalized_table
if one_vs_all:
[classes, table] = one_vs_all_func(classes, table, self.TP, self.TN, self.FP, self.FN, class_name)
normalized_table = normalized_table_calc(classes, table)
if (sparse is True):
if (self.sparse_normalized_matrix is None):
self.sparse_normalized_matrix = sparse_matrix_calc(classes, normalized_table)
print(sparse_table_print(self.sparse_normalized_matrix))
else:
print(table_print(classes, normalized_table))
if (len(classes) >= CLASS_NUMBER_THRESHOLD):
warn(CLASS_NUMBER_WARNING, RuntimeWarning)
_off_check
def stat(self, overall_param=None, class_param=None, class_name=None, summary=False):
classes = class_filter(self.classes, class_name)
class_list = class_param
overall_list = overall_param
if summary:
class_list = SUMMARY_CLASS
overall_list = SUMMARY_OVERALL
print(stat_print(classes, self.class_stat, self.overall_stat, self.digit, overall_list, class_list))
if (len(classes) >= CLASS_NUMBER_THRESHOLD):
warn(CLASS_NUMBER_WARNING, RuntimeWarning)
def __str__(self):
result = table_print(self.classes, self.table)
result += ('\n' * 4)
result += stat_print(self.classes, self.class_stat, self.overall_stat, self.digit)
if (len(self.classes) >= CLASS_NUMBER_THRESHOLD):
warn(CLASS_NUMBER_WARNING, RuntimeWarning)
return result
def __iter__(self):
for key in self.matrix.keys():
(yield (key, self.matrix[key]))
def __contains__(self, class_name):
return (class_name in self.classes)
def __getitem__(self, class_name):
return self.matrix[class_name]
def save_stat(self, name, address=True, overall_param=None, class_param=None, class_name=None, summary=False, sparse=False):
try:
message = None
class_list = class_param
overall_list = overall_param
warning_message = ''
if summary:
class_list = SUMMARY_CLASS
overall_list = SUMMARY_OVERALL
classes = self.classes
table = self.table
file = open((name + '.pycm'), 'w', encoding='utf-8')
if (sparse is True):
if (self.sparse_matrix is None):
self.sparse_matrix = sparse_matrix_calc(classes, table)
matrix = (('Matrix : \n\n' + sparse_table_print(self.sparse_matrix)) + '\n\n')
if (self.sparse_normalized_matrix is None):
self.sparse_normalized_matrix = sparse_matrix_calc(classes, self.normalized_table)
normalized_matrix = (('Normalized Matrix : \n\n' + sparse_table_print(self.sparse_normalized_matrix)) + '\n\n')
else:
matrix = (('Matrix : \n\n' + table_print(self.classes, self.table)) + '\n\n')
normalized_matrix = (('Normalized Matrix : \n\n' + table_print(self.classes, self.normalized_table)) + '\n\n')
one_vs_all = '\nOne-Vs-All : \n\n'
for c in self.classes:
one_vs_all += (str(c) + '-Vs-All : \n\n')
[classes, table] = one_vs_all_func(self.classes, self.table, self.TP, self.TN, self.FP, self.FN, c)
one_vs_all += (table_print(classes, table) + '\n\n')
classes = class_filter(self.classes, class_name)
stat = stat_print(classes, self.class_stat, self.overall_stat, self.digit, overall_list, class_list)
if (len(self.classes) >= CLASS_NUMBER_THRESHOLD):
warning_message = ((('\n' + 'Warning : ') + CLASS_NUMBER_WARNING) + '\n')
file.write(((((matrix + normalized_matrix) + stat) + one_vs_all) + warning_message))
file.close()
if address:
message = os.path.join(os.getcwd(), (name + '.pycm'))
return {'Status': True, 'Message': message}
except Exception as e:
return {'Status': False, 'Message': str(e)}
def save_html(self, name, address=True, overall_param=None, class_param=None, class_name=None, color=(0, 0, 0), normalize=False, summary=False, alt_link=False, shortener=True):
try:
class_list = class_param
overall_list = overall_param
if summary:
class_list = SUMMARY_CLASS
overall_list = SUMMARY_OVERALL
message = None
table = self.table
if normalize:
table = self.normalized_table
html_file = open((name + '.html'), 'w', encoding='utf-8')
html_file.write(html_init())
html_file.write(html_dataset_type(self.binary, self.imbalance))
html_file.write(html_table(self.classes, table, color, normalize, shortener))
html_file.write(html_overall_stat(self.overall_stat, self.digit, overall_list, self.recommended_list, alt_link))
class_stat_classes = class_filter(self.classes, class_name)
html_file.write(html_class_stat(class_stat_classes, self.class_stat, self.digit, class_list, self.recommended_list, alt_link))
html_file.write(html_end(PYCM_VERSION))
html_file.close()
if address:
message = os.path.join(os.getcwd(), (name + '.html'))
return {'Status': True, 'Message': message}
except Exception as e:
return {'Status': False, 'Message': str(e)}
def save_csv(self, name, address=True, class_param=None, class_name=None, matrix_save=True, normalize=False, summary=False, header=False):
try:
class_list = class_param
if summary:
class_list = SUMMARY_CLASS
message = None
classes = class_filter(self.classes, class_name)
csv_file = open((name + '.csv'), 'w', encoding='utf-8')
csv_data = csv_print(classes, self.class_stat, self.digit, class_list)
csv_file.write(csv_data)
if matrix_save:
matrix = self.table
if normalize:
matrix = self.normalized_table
csv_matrix_file = open(((name + '_matrix') + '.csv'), 'w', encoding='utf-8')
csv_matrix_data = csv_matrix_print(self.classes, matrix, header=header)
csv_matrix_file.write(csv_matrix_data)
if address:
message = os.path.join(os.getcwd(), (name + '.csv'))
return {'Status': True, 'Message': message}
except Exception as e:
return {'Status': False, 'Message': str(e)}
def save_obj(self, name, address=True, save_stat=False, save_vector=True):
try:
message = None
obj_file = open((name + '.obj'), 'w')
actual_vector_temp = self.actual_vector
predict_vector_temp = self.predict_vector
prob_vector_temp = self.prob_vector
weights_vector_temp = self.weights
matrix_temp = {k: self.table[k].copy() for k in self.classes}
matrix_items = []
for i in self.classes:
matrix_items.append((i, list(matrix_temp[i].items())))
(actual_vector_temp, predict_vector_temp, prob_vector_temp, weights_vector_temp) = map(vector_serializer, [actual_vector_temp, predict_vector_temp, prob_vector_temp, weights_vector_temp])
dump_dict = {'Actual-Vector': actual_vector_temp, 'Predict-Vector': predict_vector_temp, 'Prob-Vector': prob_vector_temp, 'Matrix': matrix_items, 'Digit': self.digit, 'Sample-Weight': weights_vector_temp, 'Transpose': self.transpose, 'Imbalanced': self.imbalance}
if save_stat:
dump_dict['Class-Stat'] = self.class_stat
dump_dict['Overall-Stat'] = self.overall_stat
if (not save_vector):
dump_dict['Actual-Vector'] = None
dump_dict['Predict-Vector'] = None
dump_dict['Prob-Vector'] = None
dump_dict['Sample-Weight'] = None
json.dump(dump_dict, obj_file)
if address:
message = os.path.join(os.getcwd(), (name + '.obj'))
return {'Status': True, 'Message': message}
except Exception as e:
return {'Status': False, 'Message': str(e)}
def F_beta(self, beta):
try:
F_dict = {}
for i in self.TP.keys():
F_dict[i] = F_calc(TP=self.TP[i], FP=self.FP[i], FN=self.FN[i], beta=beta)
return F_dict
except Exception:
return {}
_off_check
def sensitivity_index(self):
sensitivity_index_dict = {}
for i in self.classes:
sensitivity_index_dict[i] = sensitivity_index_calc(self.TPR[i], self.FPR[i])
return sensitivity_index_dict
_off_check
def IBA_alpha(self, alpha):
try:
IBA_dict = {}
for i in self.classes:
IBA_dict[i] = IBA_calc(self.TPR[i], self.TNR[i], alpha=alpha)
return IBA_dict
except Exception:
return {}
def TI(self, alpha, beta):
try:
TI_dict = {}
for i in self.classes:
TI_dict[i] = TI_calc(self.TP[i], self.FP[i], self.FN[i], alpha, beta)
return TI_dict
except Exception:
return {}
_off_check
def NB(self, w=1):
try:
NB_dict = {}
for i in self.classes:
NB_dict[i] = NB_calc(self.TP[i], self.FP[i], self.POP[i], w)
return NB_dict
except Exception:
return {}
def distance(self, metric):
distance_dict = {}
if (not isinstance(metric, DistanceType)):
raise pycmMatrixError(DISTANCE_METRIC_TYPE_ERROR)
for i in self.classes:
distance_dict[i] = DISTANCE_MAPPER[metric](TP=self.TP[i], FP=self.FP[i], FN=self.FN[i], TN=self.TN[i])
return distance_dict
_off_check
def CI(self, param, alpha=0.05, one_sided=False, binom_method='normal-approx'):
if isinstance(param, str):
method = 'normal-approx'
if isinstance(binom_method, str):
method = binom_method.lower()
if one_sided:
if (alpha in ALPHA_ONE_SIDE_TABLE.keys()):
CV = ALPHA_ONE_SIDE_TABLE[alpha]
else:
CV = ALPHA_ONE_SIDE_TABLE[0.05]
warn(CI_ALPHA_ONE_SIDE_WARNING, RuntimeWarning)
elif (alpha in ALPHA_TWO_SIDE_TABLE.keys()):
CV = ALPHA_TWO_SIDE_TABLE[alpha]
else:
CV = ALPHA_TWO_SIDE_TABLE[0.05]
warn(CI_ALPHA_TWO_SIDE_WARNING, RuntimeWarning)
param_u = param.upper()
if (param_u in CI_CLASS_LIST):
return __CI_class_handler__(self, param_u, CV, method)
if (param in CI_OVERALL_LIST):
return __CI_overall_handler__(self, param, CV, method)
raise pycmCIError(CI_SUPPORT_ERROR)
raise pycmCIError(CI_FORMAT_ERROR)
def __repr__(self):
return (('pycm.ConfusionMatrix(classes: ' + str(self.classes)) + ')')
def __len__(self):
return len(self.classes)
def __eq__(self, other):
if isinstance(other, ConfusionMatrix):
return (self.table == other.table)
return False
def __ne__(self, other):
return (not self.__eq__(other))
def __copy__(self):
_class = self.__class__
result = _class.__new__(_class)
result.__dict__.update(self.__dict__)
return result
def copy(self):
return self.__copy__()
def relabel(self, mapping, sort=False):
if (not isinstance(mapping, dict)):
raise pycmMatrixError(MAPPING_FORMAT_ERROR)
if (set(self.classes) != set(mapping.keys())):
raise pycmMatrixError(MAPPING_CLASS_NAME_ERROR)
if (len(self.classes) != len(set(mapping.values()))):
raise pycmMatrixError(MAPPING_CLASS_NAME_ERROR)
table_temp = {}
normalized_table_temp = {}
for row in self.classes:
temp_dict = {}
temp_dict_normalized = {}
for col in self.classes:
temp_dict[mapping[col]] = self.table[row][col]
temp_dict_normalized[mapping[col]] = self.normalized_table[row][col]
table_temp[mapping[row]] = temp_dict
normalized_table_temp[mapping[row]] = temp_dict_normalized
self.table = table_temp
self.normalized_table = normalized_table_temp
self.matrix = self.table
self.normalized_matrix = self.normalized_table
for param in self.class_stat.keys():
temp_dict = {}
for classname in self.classes:
temp_dict[mapping[classname]] = self.class_stat[param][classname]
self.class_stat[param] = temp_dict
temp_label_map = {}
for (prime_label, new_label) in self.label_map.items():
temp_label_map[prime_label] = mapping[new_label]
self.label_map = temp_label_map
self.positions = None
self.classes = [mapping[x] for x in self.classes]
if sort:
self.classes = sorted(self.classes)
self.TP = self.class_stat['TP']
self.TN = self.class_stat['TN']
self.FP = self.class_stat['FP']
self.FN = self.class_stat['FN']
__class_stat_init__(self)
_off_check
def average(self, param, none_omit=False):
return self.weighted_average(param=param, weight=self.POP, none_omit=none_omit)
_off_check
def weighted_average(self, param, weight=None, none_omit=False):
selected_weight = self.P.copy()
if (weight is not None):
if (not isinstance(weight, dict)):
raise pycmAverageError(AVERAGE_WEIGHT_ERROR)
if ((set(weight.keys()) == set(self.classes)) and all([isfloat(x) for x in weight.values()])):
selected_weight = weight.copy()
else:
raise pycmAverageError(AVERAGE_WEIGHT_ERROR)
if (param in self.class_stat):
selected_param = self.class_stat[param]
else:
raise pycmAverageError(AVERAGE_INVALID_ERROR)
try:
weight_list = []
param_list = []
for class_name in selected_param.keys():
if ((selected_param[class_name] == 'None') and none_omit):
continue
weight_list.append(selected_weight[class_name])
param_list.append(selected_param[class_name])
return numpy.average(param_list, weights=weight_list)
except Exception:
return 'None'
_off_check
def weighted_kappa(self, weight=None):
if (matrix_check(weight) is False):
warn(WEIGHTED_KAPPA_WARNING, RuntimeWarning)
return self.Kappa
if (set(weight.keys()) != set(self.classes)):
warn(WEIGHTED_KAPPA_WARNING, RuntimeWarning)
return self.Kappa
return weighted_kappa_calc(self.classes, self.table, self.P, self.TOP, self.POP, weight)
_off_check
def weighted_alpha(self, weight=None):
if (matrix_check(weight) is False):
warn(WEIGHTED_ALPHA_WARNING, RuntimeWarning)
return self.Alpha
if (set(weight.keys()) != set(self.classes)):
warn(WEIGHTED_ALPHA_WARNING, RuntimeWarning)
return self.Alpha
return weighted_alpha_calc(self.classes, self.table, self.P, self.TOP, self.POP, weight)
_off_check
def aickin_alpha(self, max_iter=200, epsilon=0.0001):
return alpha2_calc(self.TOP, self.P, self.Overall_ACC, self.POP, self.classes, max_iter, epsilon)
def brier_score(self, pos_class=None):
if ((self.prob_vector is None) or (not self.binary)):
raise pycmVectorError(BRIER_LOG_LOSS_PROB_ERROR)
if ((pos_class is None) and isinstance(self.classes[0], str)):
raise pycmVectorError(BRIER_LOG_LOSS_CLASS_ERROR)
return brier_score_calc(self.classes, self.prob_vector, self.actual_vector, self.weights, pos_class)
def log_loss(self, normalize=True, pos_class=None):
if ((self.prob_vector is None) or (not self.binary)):
raise pycmVectorError(BRIER_LOG_LOSS_PROB_ERROR)
if ((pos_class is None) and isinstance(self.classes[0], str)):
raise pycmVectorError(BRIER_LOG_LOSS_CLASS_ERROR)
return log_loss_calc(self.classes, self.prob_vector, self.actual_vector, normalize, self.weights, pos_class)
def position(self):
if ((self.predict_vector is None) or (self.actual_vector is None)):
raise pycmVectorError(VECTOR_ONLY_ERROR)
if (self.positions is None):
classes = list(self.label_map.keys())
positions = {self.label_map[_class]: {'TP': [], 'FP': [], 'TN': [], 'FN': []} for _class in classes}
[actual_vector, predict_vector] = vector_filter(self.actual_vector, self.predict_vector)
for (index, observation) in enumerate(predict_vector):
for _class in classes:
label = self.label_map[_class]
if (observation == actual_vector[index]):
if (_class == observation):
positions[label]['TP'].append(index)
else:
positions[label]['TN'].append(index)
elif (_class == observation):
positions[label]['FP'].append(index)
elif (_class == actual_vector[index]):
positions[label]['FN'].append(index)
else:
positions[label]['TN'].append(index)
self.positions = positions
return self.positions
def to_array(self, normalized=False, one_vs_all=False, class_name=None):
classes = self.classes
table = self.table
if normalized:
table = self.normalized_table
if one_vs_all:
[classes, table] = one_vs_all_func(classes, table, self.TP, self.TN, self.FP, self.FN, class_name)
if normalized:
table = normalized_table_calc(classes, table)
array = []
for key in classes:
row = [table[key][i] for i in classes]
array.append(row)
return numpy.array(array)
def combine(self, other, metrics_off=False):
if (isinstance(other, ConfusionMatrix) is False):
raise pycmMatrixError(COMBINE_TYPE_ERROR)
return ConfusionMatrix(matrix=matrix_combine(self.matrix, other.matrix), metrics_off=metrics_off)
def plot(self, normalized=False, one_vs_all=False, class_name=None, title='Confusion Matrix', number_label=False, cmap=None, plot_lib='matplotlib'):
matrix = self.to_array(normalized=normalized, one_vs_all=one_vs_all, class_name=class_name)
classes = self.classes
if normalized:
title += ' (Normalized)'
if (one_vs_all and (class_name in classes)):
classes = [class_name, '~']
try:
from matplotlib import pyplot as plt
except (ModuleNotFoundError, ImportError):
raise pycmPlotError(MATPLOTLIB_PLOT_LIBRARY_ERROR)
if (cmap is None):
cmap = plt.cm.gray_r
(fig, ax) = plt.subplots()
fig.canvas.manager.set_window_title(title)
if (plot_lib == 'seaborn'):
try:
import seaborn as sns
except (ModuleNotFoundError, ImportError):
raise pycmPlotError(SEABORN_PLOT_LIBRARY_ERROR)
ax = sns.heatmap(matrix, cmap=cmap)
return axes_gen(ax, classes, matrix, title, cmap, number_label, plot_lib)
plt.imshow(matrix, cmap=cmap)
plt.colorbar()
return axes_gen(ax, classes, matrix, title, cmap, number_label, plot_lib) |
def __getitem_(g, self, i):
if sym_help._is_tensor_list(self):
return g.op('SequenceAt', self, i)
else:
from torch.onnx.symbolic_opset9 import __getitem_ as getitem
return getitem(g, self, i) |
class Argument(object):
def __init__(self, _type, name, is_optional):
self.type = _type
self.name = name
self.is_optional = is_optional
def __repr__(self):
return ((self.type + ' ') + self.name) |
class ModuleTestCluster(TestCluster):
def __init__(self, linenos: int) -> None:
self.__type_system = TypeSystem()
self.__linenos = linenos
self.__generators: dict[(ProperType, OrderedSet[GenericAccessibleObject])] = defaultdict(OrderedSet)
self.__modifiers: dict[(TypeInfo, OrderedSet[GenericAccessibleObject])] = defaultdict(OrderedSet)
self.__accessible_objects_under_test: OrderedSet[GenericAccessibleObject] = OrderedSet()
self.__function_data_for_accessibles: dict[(GenericAccessibleObject, _CallableData)] = {}
self.__callables: OrderedSet[GenericCallableAccessibleObject] = OrderedSet()
def log_cluster_statistics(self) -> None:
stats = TypeGuessingStats()
for accessible in self.__accessible_objects_under_test:
if isinstance(accessible, GenericCallableAccessibleObject):
accessible.inferred_signature.log_stats_and_guess_signature(accessible.is_constructor(), str(accessible), stats)
def _serialize_helper(obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, SignatureInfo):
return dataclasses.asdict(obj)
return obj
stat.track_output_variable(RuntimeVariable.SignatureInfos, json.dumps(stats.signature_infos, default=_serialize_helper))
stat.track_output_variable(RuntimeVariable.NumberOfConstructors, str(stats.number_of_constructors))
def _drop_generator(self, accessible: GenericCallableAccessibleObject):
gens = self.__generators.get(accessible.generated_type())
if (gens is None):
return
gens.discard(accessible)
if (len(gens) == 0):
self.__generators.pop(accessible.generated_type())
def _add_or_make_union(old_type: ProperType, new_type: ProperType, max_size: int=5) -> UnionType:
if isinstance(old_type, UnionType):
items = old_type.items
if ((len(items) >= max_size) or (new_type in items)):
return old_type
new_type = UnionType(tuple(sorted((*items, new_type))))
elif (old_type in (ANY, new_type)):
new_type = UnionType((new_type,))
else:
new_type = UnionType(tuple(sorted((old_type, new_type))))
return new_type
def update_return_type(self, accessible: GenericCallableAccessibleObject, new_type: ProperType) -> None:
old_type = accessible.inferred_signature.return_type
new_type = self._add_or_make_union(old_type, new_type)
if (old_type == new_type):
return
self._drop_generator(accessible)
self.get_generators_for.cache_clear()
self.get_all_generatable_types.cache_clear()
accessible.inferred_signature.return_type = new_type
self.__generators[new_type].add(accessible)
def update_parameter_knowledge(self, accessible: GenericCallableAccessibleObject, param_name: str, knowledge: tt.UsageTraceNode) -> None:
accessible.inferred_signature.usage_trace[param_name].merge(knowledge)
def type_system(self) -> TypeSystem:
return self.__type_system
def linenos(self) -> int:
return self.__linenos
def add_generator(self, generator: GenericAccessibleObject) -> None:
if isinstance(generator, GenericCallableAccessibleObject):
self.__callables.add(generator)
generated_type = generator.generated_type()
if (isinstance(generated_type, NoneType) or generated_type.accept(is_primitive_type)):
return
self.__generators[generated_type].add(generator)
def add_accessible_object_under_test(self, objc: GenericAccessibleObject, data: _CallableData) -> None:
self.__accessible_objects_under_test.add(objc)
self.__function_data_for_accessibles[objc] = data
def add_modifier(self, typ: TypeInfo, obj: GenericAccessibleObject) -> None:
if isinstance(obj, GenericCallableAccessibleObject):
self.__callables.add(obj)
self.__modifiers[typ].add(obj)
def accessible_objects_under_test(self) -> OrderedSet[GenericAccessibleObject]:
return self.__accessible_objects_under_test
def function_data_for_accessibles(self) -> dict[(GenericAccessibleObject, _CallableData)]:
return self.__function_data_for_accessibles
def num_accessible_objects_under_test(self) -> int:
return len(self.__accessible_objects_under_test)
_cache(maxsize=1024)
def get_generators_for(self, typ: ProperType) -> tuple[(OrderedSet[GenericAccessibleObject], bool)]:
if isinstance(typ, AnyType):
return (OrderedSet(itertools.chain.from_iterable(self.__generators.values())), False)
results: OrderedSet[GenericAccessibleObject] = OrderedSet()
only_any = True
for (gen_type, generators) in self.__generators.items():
if self.__type_system.is_maybe_subtype(gen_type, typ):
results.update(generators)
only_any &= (gen_type == ANY)
return (results, only_any)
class _FindModifiers(TypeVisitor[OrderedSet[GenericAccessibleObject]]):
def __init__(self, cluster: TestCluster):
self.cluster = cluster
def visit_any_type(self, left: AnyType) -> OrderedSet[GenericAccessibleObject]:
return OrderedSet(itertools.chain.from_iterable(self.cluster.modifiers.values()))
def visit_none_type(self, left: NoneType) -> OrderedSet[GenericAccessibleObject]:
return OrderedSet()
def visit_instance(self, left: Instance) -> OrderedSet[GenericAccessibleObject]:
result: OrderedSet[GenericAccessibleObject] = OrderedSet()
for type_info in self.cluster.type_system.get_superclasses(left.type):
result.update(self.cluster.modifiers[type_info])
return result
def visit_tuple_type(self, left: TupleType) -> OrderedSet[GenericAccessibleObject]:
return OrderedSet()
def visit_union_type(self, left: UnionType) -> OrderedSet[GenericAccessibleObject]:
result: OrderedSet[GenericAccessibleObject] = OrderedSet()
for element in left.items:
result.update(element.accept(self))
return result
def visit_unsupported_type(self, left: Unsupported) -> OrderedSet[GenericAccessibleObject]:
raise NotImplementedError('This type shall not be used during runtime')
def get_modifiers_for(self, typ: ProperType) -> OrderedSet[GenericAccessibleObject]:
return typ.accept(self._FindModifiers(self))
def generators(self) -> dict[(ProperType, OrderedSet[GenericAccessibleObject])]:
return self.__generators
def modifiers(self) -> dict[(TypeInfo, OrderedSet[GenericAccessibleObject])]:
return self.__modifiers
def get_random_accessible(self) -> (GenericAccessibleObject | None):
if (self.num_accessible_objects_under_test() == 0):
return None
return randomness.choice(self.__accessible_objects_under_test)
def get_random_call_for(self, typ: ProperType) -> GenericAccessibleObject:
accessible_objects = self.get_modifiers_for(typ)
if (len(accessible_objects) == 0):
raise ConstructionFailedException(f'No modifiers for {typ}')
return randomness.choice(accessible_objects)
_cache(maxsize=128)
def get_all_generatable_types(self) -> list[ProperType]:
generatable = OrderedSet(self.__generators.keys())
generatable.update(self.type_system.primitive_proper_types)
generatable.update(self.type_system.collection_proper_types)
return list(generatable)
def select_concrete_type(self, typ: ProperType) -> ProperType:
if isinstance(typ, AnyType):
typ = randomness.choice(self.get_all_generatable_types())
if isinstance(typ, UnionType):
typ = self.select_concrete_type(randomness.choice(typ.items))
return typ
def track_statistics_values(self, tracking_fun: Callable[([RuntimeVariable, Any], None)]) -> None:
tracking_fun(RuntimeVariable.AccessibleObjectsUnderTest, self.num_accessible_objects_under_test())
tracking_fun(RuntimeVariable.GeneratableTypes, len(self.get_all_generatable_types()))
cyclomatic_complexities = self.__compute_cyclomatic_complexities(self.function_data_for_accessibles.values())
if (cyclomatic_complexities is not None):
tracking_fun(RuntimeVariable.McCabeAST, json.dumps(cyclomatic_complexities))
tracking_fun(RuntimeVariable.LineNos, self.__linenos)
def __compute_cyclomatic_complexities(callable_data: typing.Iterable[_CallableData]) -> list[int]:
return [item.cyclomatic_complexity for item in callable_data if (item.cyclomatic_complexity is not None)] |
class PsiOptimized(nn.Module):
def __init__(self, dim=128, K=100, numclasses=50, use_adapter=False, adapter_reduce_dim=True):
super().__init__()
self.use_adapter = use_adapter
self.adapter_reduce_dim = adapter_reduce_dim
if use_adapter:
self.adapter = ResBlockAudio(dim)
if adapter_reduce_dim:
self.down = nn.Conv2d(dim, dim, 4, (2, 2), 1)
self.up = nn.ConvTranspose2d(dim, dim, 4, (2, 2), 1)
self.decoder = nn.Sequential(nn.ConvTranspose2d(dim, dim, 3, (2, 2), 1), nn.ReLU(True), nn.BatchNorm2d(dim), nn.ConvTranspose2d(dim, dim, 4, (2, 2), 1), nn.ReLU(), nn.BatchNorm2d(dim), nn.ConvTranspose2d(dim, dim, 4, (2, 2), 1), nn.ReLU(), nn.BatchNorm2d(dim), nn.ConvTranspose2d(dim, dim, 4, (2, 2), 1), nn.ReLU(), nn.BatchNorm2d(dim), nn.ConvTranspose2d(dim, 1, 12, 1, 1), nn.ReLU(), nn.Linear(513, K), nn.ReLU())
self.apply(weights_init)
def forward(self, hs):
if self.use_adapter:
hcat = self.adapter(hs)
else:
hcat = hs
if self.adapter_reduce_dim:
hcat = self.down(hcat)
z_q_x_st = self.up(hcat)
out = self.decoder(z_q_x_st)
else:
out = self.decoder(hcat)
return (out, hcat) |
class UniFormer(nn.Module):
def __init__(self, model_name: str='S', pretrained: str=None, num_classes: int=1000, *args, **kwargs) -> None:
super().__init__()
assert (model_name in uniformer_settings.keys()), f'UniFormer model name should be in {list(uniformer_settings.keys())}'
depth = uniformer_settings[model_name]
head_dim = 64
drop_path_rate = 0.0
embed_dims = [64, 128, 320, 512]
for i in range(4):
self.add_module(f'patch_embed{(i + 1)}', PatchEmbed((4 if (i == 0) else 2), (3 if (i == 0) else embed_dims[(i - 1)]), embed_dims[i]))
self.pos_drop = nn.Dropout(0.0)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depth))]
num_heads = [(dim // head_dim) for dim in embed_dims]
self.blocks1 = nn.ModuleList([CBlock(embed_dims[0], dpr[i]) for i in range(depth[0])])
self.blocks2 = nn.ModuleList([CBlock(embed_dims[1], dpr[(i + depth[0])]) for i in range(depth[1])])
self.blocks3 = nn.ModuleList([SABlock(embed_dims[2], num_heads[2], dpr[((i + depth[0]) + depth[1])]) for i in range(depth[2])])
self.blocks4 = nn.ModuleList([SABlock(embed_dims[3], num_heads[3], dpr[(((i + depth[0]) + depth[1]) + depth[2])]) for i in range(depth[3])])
self.norm = nn.BatchNorm2d(embed_dims[(- 1)])
self.head = nn.Linear(embed_dims[(- 1)], num_classes)
self._init_weights(pretrained)
def _init_weights(self, pretrained: str=None) -> None:
if pretrained:
try:
self.load_state_dict(torch.load(pretrained, map_location='cpu')['model'])
except RuntimeError:
pretrained_dict = torch.load(pretrained, map_location='cpu')['model']
pretrained_dict.popitem()
pretrained_dict.popitem()
self.load_state_dict(pretrained_dict, strict=False)
finally:
print(f'Loaded imagenet pretrained from {pretrained}')
else:
for (n, m) in self.named_modules():
if isinstance(m, nn.Linear):
if n.startswith('head'):
nn.init.zeros_(m.weight)
nn.init.zeros_(m.bias)
else:
nn.init.xavier_uniform_(m.weight)
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if (m.bias is not None):
nn.init.zeros_(m.bias)
def forward(self, x: torch.Tensor):
x = self.patch_embed1(x)
x = self.pos_drop(x)
for blk in self.blocks1:
x = blk(x)
x = self.patch_embed2(x)
for blk in self.blocks2:
x = blk(x)
x = self.patch_embed3(x)
for blk in self.blocks3:
x = blk(x)
x = self.patch_embed4(x)
for blk in self.blocks4:
x = blk(x)
x = self.norm(x)
x = self.head(x.flatten(2).mean(2))
return x |
class Function_limit(BuiltinFunction):
def __init__(self):
BuiltinFunction.__init__(self, 'limit', nargs=0, conversions=dict(maxima='limit'))
def _latex_(self):
return '\\lim'
def _print_latex_(self, ex, var, to, direction=''):
if (repr(direction) == 'minus'):
dir_str = '^-'
elif (repr(direction) == 'plus'):
dir_str = '^+'
else:
dir_str = ''
return '\\lim_{{{} \\to {}{}}}\\, {}'.format(latex(var), latex(to), dir_str, latex(ex)) |
def create_image_vectors(images):
img_vectors = {}
for img in images.keys():
img_data = image.img_to_array(images[img])
img_data = np.expand_dims(img_data, axis=0)
img_data = preprocess_input(img_data)
vgg16_feature = model.predict(img_data)
vgg16_feature_np = np.array(vgg16_feature)
img_vectors[img] = vgg16_feature_np.flatten()
print(img)
return img_vectors |
class STNClsNet(nn.Module):
def __init__(self, args):
super(STNClsNet, self).__init__()
self.args = args
r1 = args.span_range_height
r2 = args.span_range_width
assert ((r1 < 1) and (r2 < 1))
target_control_points = torch.Tensor(list(itertools.product(np.arange((- r1), (r1 + 1e-05), ((2.0 * r1) / (args.grid_height - 1))), np.arange((- r2), (r2 + 1e-05), ((2.0 * r2) / (args.grid_width - 1))))))
(Y, X) = target_control_points.split(1, dim=1)
target_control_points = torch.cat([X, Y], dim=1)
GridLocNet = {'unbounded_stn': UnBoundedGridLocNet, 'bounded_stn': BoundedGridLocNet}[args.model]
self.loc_net = GridLocNet(args.grid_height, args.grid_width, target_control_points)
self.tps = TPSGridGen(args.image_height, args.image_width, target_control_points)
self.cls_net = ClsNet()
def forward(self, x):
batch_size = x.size(0)
source_control_points = self.loc_net(x)
source_coordinate = self.tps(source_control_points)
grid = source_coordinate.view(batch_size, self.args.image_height, self.args.image_width, 2)
transformed_x = grid_sample(x, grid)
logit = self.cls_net(transformed_x)
return logit |
class CCRStructure(Structure):
_fields_ = [('results', POINTER(CodeCompletionResult)), ('numResults', c_int)]
def __len__(self):
return self.numResults
def __getitem__(self, key):
if (len(self) <= key):
raise IndexError
return self.results[key] |
def coordinated_get(coordinator, queue):
while (not coordinator.should_stop()):
try:
return queue.get(block=True, timeout=1.0)
except Queue.Empty:
continue
raise Exception('Coordinator stopped during get()') |
class ZipReader(object):
zip_bank = dict()
def __init__(self):
super(ZipReader, self).__init__()
def get_zipfile(path):
zip_bank = ZipReader.zip_bank
if (path not in zip_bank):
zfile = zipfile.ZipFile(path, 'r')
zip_bank[path] = zfile
return zip_bank[path]
def split_zip_style_path(path):
print(path)
pos_at = path.index('')
assert (pos_at != (- 1)), ("character '' is not found from the given path '%s'" % path)
zip_path = path[0:pos_at]
folder_path = path[(pos_at + 1):]
folder_path = str.strip(folder_path, '/')
return (zip_path, folder_path)
def list_folder(path):
(zip_path, folder_path) = ZipReader.split_zip_style_path(path)
zfile = ZipReader.get_zipfile(zip_path)
folder_list = []
for file_foler_name in zfile.namelist():
file_foler_name = str.strip(file_foler_name, '/')
if (file_foler_name.startswith(folder_path) and (len(os.path.splitext(file_foler_name)[(- 1)]) == 0) and (file_foler_name != folder_path)):
if (len(folder_path) == 0):
folder_list.append(file_foler_name)
else:
folder_list.append(file_foler_name[(len(folder_path) + 1):])
return folder_list
def list_files(path, extension=None):
if (extension is None):
extension = ['.*']
(zip_path, folder_path) = ZipReader.split_zip_style_path(path)
zfile = ZipReader.get_zipfile(zip_path)
file_lists = []
for file_foler_name in zfile.namelist():
file_foler_name = str.strip(file_foler_name, '/')
if (file_foler_name.startswith(folder_path) and (str.lower(os.path.splitext(file_foler_name)[(- 1)]) in extension)):
if (len(folder_path) == 0):
file_lists.append(file_foler_name)
else:
file_lists.append(file_foler_name[(len(folder_path) + 1):])
return file_lists
def read(path):
(zip_path, path_img) = ZipReader.split_zip_style_path(path)
zfile = ZipReader.get_zipfile(zip_path)
data = zfile.read(path_img)
return data
def imread(path):
(zip_path, path_img) = ZipReader.split_zip_style_path(path)
zfile = ZipReader.get_zipfile(zip_path)
data = zfile.read(path_img)
try:
im = Image.open(io.BytesIO(data))
except:
print('ERROR IMG LOADED: ', path_img)
random_img = (np.random.rand(224, 224, 3) * 255)
im = Image.fromarray(np.uint8(random_img))
return im |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.