hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7904fd2e64a726da8dffb8b826b5534ad68a94f5
| 10,351
|
py
|
Python
|
crf-seq/sets/sets/4/seq_detect_1p.py
|
roma-patel/lstm-crf
|
25012b1218b60090f467fe5ed5a15d7a28b3134c
|
[
"Apache-2.0"
] | 1
|
2020-02-24T06:25:17.000Z
|
2020-02-24T06:25:17.000Z
|
crf-seq/sets/sets/4/seq_detect_1p.py
|
roma-patel/lstm-crf
|
25012b1218b60090f467fe5ed5a15d7a28b3134c
|
[
"Apache-2.0"
] | null | null | null |
crf-seq/sets/sets/4/seq_detect_1p.py
|
roma-patel/lstm-crf
|
25012b1218b60090f467fe5ed5a15d7a28b3134c
|
[
"Apache-2.0"
] | null | null | null |
import pycrfsuite
import sklearn
from itertools import chain
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import LabelBinarizer
import re
import json
annotypes = ['Participants', 'Intervention', 'Outcome']
annotype = annotypes[0]
path = '/nlp/data/romap/crf/'
#path = '/Users/romapatel/Desktop/crf/'
def run():
train_sents, test_sents = get_train_test_sets()
print len(test_sents)
indwords_list = get_ind_words()
patterns_list = get_patterns()
X_train = [sent_features(train_sents[docid], indwords_list, patterns_list) for docid in train_sents.keys()]
y_train = [sent_labels(train_sents[docid]) for docid in train_sents.keys()]
X_test = [sent_features(test_sents[docid], indwords_list, patterns_list) for docid in test_sents.keys()]
y_test = [sent_labels(test_sents[docid]) for docid in test_sents.keys()]
trainer = pycrfsuite.Trainer(verbose=False)
for xseq, yseq in zip(X_train, y_train):
trainer.append(xseq, yseq)
trainer.set_params({'c1': 1.0,'c2': 1e-3, 'max_iterations': 50, 'feature.possible_transitions': True})
trainer.train('PICO.crfsuite')
tagger = pycrfsuite.Tagger()
tagger.open('PICO.crfsuite')
get_results(test_sents, tagger, indwords_list, patterns_list)
def get_results(test_sents, tagger, indwords_list, patterns_list):
f1 = open(path + 'sets/4/' + annotype + '-test_pred.json', 'w+')
f2 = open(path + 'sets/4/' + annotype + '-test_correct.json', 'w+')
pred_dict, correct_dict = {}, {}
for docid in test_sents:
pred, correct = tagger.tag(sent_features(test_sents[docid], indwords_list, patterns_list)), sent_labels(test_sents[docid])
spans, span, outside = [], [], True
for i in range(len(pred)):
if pred[i] == '0' and outside is True: continue
elif pred[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif pred[i] == '1' and outside is False: continue
elif pred[i] == '1' and outside is True:
outside = False
span.append(i)
pred_dict[docid] = spans
spans, span, outside = [], [], True
for i in range(len(correct)):
if correct[i] == '0' and outside is True: continue
elif correct[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif correct[i] == '1' and outside is False: continue
elif correct[i] == '1' and outside is True:
outside = False
span.append(i)
correct_dict[docid] = spans
f1.write(json.dumps(pred_dict))
f2.write(json.dumps(correct_dict))
def get_ind_words():
fin_list = []
for annotype in annotypes:
list = []
#filename = annotype.lower() + '_words.txt'
filename = annotype.lower() + '_unigrams.tsv'
f = open(path + 'crf_files/' + filename, 'r')
for line in f:
#word = line[:-1]
items = line.split('\t')
word = items[1][:-1]
if word not in list:
list.append(word)
if annotype == 'Intervention':
f = open(path + 'crf_files/drug_names.txt', 'r')
for line in f:
word = line[:-1]
if word not in list:
list.append(word)
fin_list.append(list)
indwords = [fin_list[0], fin_list[1], fin_list[2]]
return indwords
#all lowercased
def get_patterns():
fin_list = []
for annotype in annotypes:
list = []
#filename = annotype.lower() + '_pattern_copy.txt'
filename = annotype.lower() + '_trigrams3.tsv'
f = open(path + 'crf_files/' + filename, 'r')
for line in f:
#word = line[:-1].lower()
word = line[:-1].split('\t')
word = word[1]
if word not in list:
list.append(word)
fin_list.append(list)
patterns = [fin_list[0], fin_list[1], fin_list[2]]
return patterns
def isindword(word, annotype, indwords_list):
if annotype == annotypes[0]: list = indwords_list[0]
elif annotype == annotypes[1]: list = indwords_list[1]
else: list = indwords_list[2]
f = open(path + 'crf_files/numbers.txt', 'r')
for line in f:
if line[:-1] in word.lower():
return True
if word.lower() in list or word.lower()[:-1] in list or word.lower()[-3:] in list: return True
else: return False
def ispattern(word, pos, annotype, pattern_list):
if annotype == annotypes[0]: list = pattern_list[0]
elif annotype == annotypes[1]: list = pattern_list[1]
else: list = pattern_list[2]
for pattern in pattern_list:
if word.lower() in pattern or pos.lower() in pattern: return True
else: return False
def word_features(sent, i, indwords_list, pattern_list):
word = sent[i][0]
postag = sent[i][2]
features = ['bias', 'word.lower=' + word.lower(),'word[-3:]=' + word[-3:],
'word[-4:]=' + word[-4:],'word.isupper=%s' % word.isupper(),
'word.istitle=%s' % word.istitle(), 'word.isdigit=%s' % word.isdigit(),
'postag=' + postag, 'isindword=%s' % isindword(word, annotype, indwords_list),
'word[0:4]=' + word[0:4], 'ispattern=%s' % ispattern(word, postag, annotype, pattern_list)]
#prev previous word
if i > 1:
word1 = sent[i-2][0]
postag1 = sent[i-2][2]
features.extend(['-1:word.lower=' + word1.lower(), '-1:word.istitle=%s' % word1.istitle(),
'-1:word.isupper=%s' % word1.isupper(), '-1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:]])
#previous word
if i > 0:
word1 = sent[i-1][0]
postag1 = sent[i-1][2]
features.extend(['-1:word.lower=' + word1.lower(), '-1:word.istitle=%s' % word1.istitle(),
'-1:word.isupper=%s' % word1.isupper(), '-1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:], 'ispattern=%s' % ispattern(word, postag, annotype, pattern_list)])
else:
features.append('BOS')
#next to next word
if i < len(sent)-2:
word1 = sent[i+2][0]
postag1 = sent[i+2][2]
features.extend(['+1:word.lower=' + word1.lower(), '+1:word.istitle=%s' % word1.istitle(),
'+1:word.isupper=%s' % word1.isupper(), '+1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:]])
#next word
if i < len(sent)-1:
word1 = sent[i+1][0]
postag1 = sent[i+1][2]
features.extend(['+1:word.lower=' + word1.lower(), '+1:word.istitle=%s' % word1.istitle(),
'+1:word.isupper=%s' % word1.isupper(), '+1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:], 'ispattern=%s' % ispattern(word, postag, annotype, pattern_list)])
else:
features.append('EOS')
return features
def sent_features(sent, indwords_list, patterns_list):
return [word_features(sent, i, indwords_list, patterns_list) for i in range(len(sent))]
def sent_labels(sent):
return [str(p_label) for token, ner, postag, p_label, i_label, o_label in sent]
def sent_tokens(sent):
return [token for token, ner, postag, p_label, i_label, o_label in sent]
def print_results(example_sent, tagger, indwords_list, docid, dict):
pred, correct = tagger.tag(sent_features(example_sent, indwords_list)), sent_labels(example_sent)
spans, span, outside = [], [], True
for i in range(len(pred)):
if pred[i] == '0' and outside is True: continue
elif pred[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif pred[i] == '1' and outside is False: continue
elif pred[i] == '1' and outside is True:
outside = False
span.append(i)
f = open(path + annotype + '-test.json', 'w+')
print '\n\nPredicted: ' + str(spans)
for span in spans:
s = ' '
for i in range(span[0], span[1]):
s += example_sent[i][0] + ' '
print s
spans, span, outside = [], [], True
for i in range(len(correct)):
if correct[i] == '0' and outside is True: continue
elif correct[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif correct[i] == '1' and outside is False: continue
elif correct[i] == '1' and outside is True:
outside = False
span.append(i)
print '\n\nCorrect: ' + str(spans)
for span in spans:
s = ' '
for i in range(span[0], span[1]):
s += example_sent[i][0] + ' '
print s
def get_training_data():
f = open(path + 'crf_files/difficulty_crf_mv.json', 'r')
for line in f:
dict = json.loads(line)
return dict
def get_train_test_sets():
test_docids = []
f = open(path + 'crf_files/gold_docids.txt', 'r')
for line in f:
test_docids.append(line[:-1])
doc_dict = get_training_data()
test_sents, train_sents = {}, {}
count = 0
for docid in doc_dict:
sents = doc_dict[docid]
if len(sents) == 0: continue
count += 1
#if count >= 100: break
if docid not in test_docids:
train_sents[docid] = sents
else:
test_sents[docid] = sents
f = open(path + 'difficulty_new.json', 'r')
for line in f:
doc_dict_new = json.loads(line)
count = 1
for docid in doc_dict_new:
if docid in train_sents.keys(): continue
if count < 9481:
count += 1
continue
train_sents[docid] = doc_dict_new[docid]
count += 1
return train_sents, test_sents
if __name__ == '__main__':
run()
| 38.195572
| 130
| 0.575983
|
import pycrfsuite
import sklearn
from itertools import chain
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import LabelBinarizer
import re
import json
annotypes = ['Participants', 'Intervention', 'Outcome']
annotype = annotypes[0]
path = '/nlp/data/romap/crf/'
def run():
train_sents, test_sents = get_train_test_sets()
print len(test_sents)
indwords_list = get_ind_words()
patterns_list = get_patterns()
X_train = [sent_features(train_sents[docid], indwords_list, patterns_list) for docid in train_sents.keys()]
y_train = [sent_labels(train_sents[docid]) for docid in train_sents.keys()]
X_test = [sent_features(test_sents[docid], indwords_list, patterns_list) for docid in test_sents.keys()]
y_test = [sent_labels(test_sents[docid]) for docid in test_sents.keys()]
trainer = pycrfsuite.Trainer(verbose=False)
for xseq, yseq in zip(X_train, y_train):
trainer.append(xseq, yseq)
trainer.set_params({'c1': 1.0,'c2': 1e-3, 'max_iterations': 50, 'feature.possible_transitions': True})
trainer.train('PICO.crfsuite')
tagger = pycrfsuite.Tagger()
tagger.open('PICO.crfsuite')
get_results(test_sents, tagger, indwords_list, patterns_list)
def get_results(test_sents, tagger, indwords_list, patterns_list):
f1 = open(path + 'sets/4/' + annotype + '-test_pred.json', 'w+')
f2 = open(path + 'sets/4/' + annotype + '-test_correct.json', 'w+')
pred_dict, correct_dict = {}, {}
for docid in test_sents:
pred, correct = tagger.tag(sent_features(test_sents[docid], indwords_list, patterns_list)), sent_labels(test_sents[docid])
spans, span, outside = [], [], True
for i in range(len(pred)):
if pred[i] == '0' and outside is True: continue
elif pred[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif pred[i] == '1' and outside is False: continue
elif pred[i] == '1' and outside is True:
outside = False
span.append(i)
pred_dict[docid] = spans
spans, span, outside = [], [], True
for i in range(len(correct)):
if correct[i] == '0' and outside is True: continue
elif correct[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif correct[i] == '1' and outside is False: continue
elif correct[i] == '1' and outside is True:
outside = False
span.append(i)
correct_dict[docid] = spans
f1.write(json.dumps(pred_dict))
f2.write(json.dumps(correct_dict))
def get_ind_words():
fin_list = []
for annotype in annotypes:
list = []
filename = annotype.lower() + '_unigrams.tsv'
f = open(path + 'crf_files/' + filename, 'r')
for line in f:
items = line.split('\t')
word = items[1][:-1]
if word not in list:
list.append(word)
if annotype == 'Intervention':
f = open(path + 'crf_files/drug_names.txt', 'r')
for line in f:
word = line[:-1]
if word not in list:
list.append(word)
fin_list.append(list)
indwords = [fin_list[0], fin_list[1], fin_list[2]]
return indwords
def get_patterns():
fin_list = []
for annotype in annotypes:
list = []
filename = annotype.lower() + '_trigrams3.tsv'
f = open(path + 'crf_files/' + filename, 'r')
for line in f:
word = line[:-1].split('\t')
word = word[1]
if word not in list:
list.append(word)
fin_list.append(list)
patterns = [fin_list[0], fin_list[1], fin_list[2]]
return patterns
def isindword(word, annotype, indwords_list):
if annotype == annotypes[0]: list = indwords_list[0]
elif annotype == annotypes[1]: list = indwords_list[1]
else: list = indwords_list[2]
f = open(path + 'crf_files/numbers.txt', 'r')
for line in f:
if line[:-1] in word.lower():
return True
if word.lower() in list or word.lower()[:-1] in list or word.lower()[-3:] in list: return True
else: return False
def ispattern(word, pos, annotype, pattern_list):
if annotype == annotypes[0]: list = pattern_list[0]
elif annotype == annotypes[1]: list = pattern_list[1]
else: list = pattern_list[2]
for pattern in pattern_list:
if word.lower() in pattern or pos.lower() in pattern: return True
else: return False
def word_features(sent, i, indwords_list, pattern_list):
word = sent[i][0]
postag = sent[i][2]
features = ['bias', 'word.lower=' + word.lower(),'word[-3:]=' + word[-3:],
'word[-4:]=' + word[-4:],'word.isupper=%s' % word.isupper(),
'word.istitle=%s' % word.istitle(), 'word.isdigit=%s' % word.isdigit(),
'postag=' + postag, 'isindword=%s' % isindword(word, annotype, indwords_list),
'word[0:4]=' + word[0:4], 'ispattern=%s' % ispattern(word, postag, annotype, pattern_list)]
if i > 1:
word1 = sent[i-2][0]
postag1 = sent[i-2][2]
features.extend(['-1:word.lower=' + word1.lower(), '-1:word.istitle=%s' % word1.istitle(),
'-1:word.isupper=%s' % word1.isupper(), '-1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:]])
if i > 0:
word1 = sent[i-1][0]
postag1 = sent[i-1][2]
features.extend(['-1:word.lower=' + word1.lower(), '-1:word.istitle=%s' % word1.istitle(),
'-1:word.isupper=%s' % word1.isupper(), '-1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:], 'ispattern=%s' % ispattern(word, postag, annotype, pattern_list)])
else:
features.append('BOS')
if i < len(sent)-2:
word1 = sent[i+2][0]
postag1 = sent[i+2][2]
features.extend(['+1:word.lower=' + word1.lower(), '+1:word.istitle=%s' % word1.istitle(),
'+1:word.isupper=%s' % word1.isupper(), '+1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:]])
if i < len(sent)-1:
word1 = sent[i+1][0]
postag1 = sent[i+1][2]
features.extend(['+1:word.lower=' + word1.lower(), '+1:word.istitle=%s' % word1.istitle(),
'+1:word.isupper=%s' % word1.isupper(), '+1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:], 'ispattern=%s' % ispattern(word, postag, annotype, pattern_list)])
else:
features.append('EOS')
return features
def sent_features(sent, indwords_list, patterns_list):
return [word_features(sent, i, indwords_list, patterns_list) for i in range(len(sent))]
def sent_labels(sent):
return [str(p_label) for token, ner, postag, p_label, i_label, o_label in sent]
def sent_tokens(sent):
return [token for token, ner, postag, p_label, i_label, o_label in sent]
def print_results(example_sent, tagger, indwords_list, docid, dict):
pred, correct = tagger.tag(sent_features(example_sent, indwords_list)), sent_labels(example_sent)
spans, span, outside = [], [], True
for i in range(len(pred)):
if pred[i] == '0' and outside is True: continue
elif pred[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif pred[i] == '1' and outside is False: continue
elif pred[i] == '1' and outside is True:
outside = False
span.append(i)
f = open(path + annotype + '-test.json', 'w+')
print '\n\nPredicted: ' + str(spans)
for span in spans:
s = ' '
for i in range(span[0], span[1]):
s += example_sent[i][0] + ' '
print s
spans, span, outside = [], [], True
for i in range(len(correct)):
if correct[i] == '0' and outside is True: continue
elif correct[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif correct[i] == '1' and outside is False: continue
elif correct[i] == '1' and outside is True:
outside = False
span.append(i)
print '\n\nCorrect: ' + str(spans)
for span in spans:
s = ' '
for i in range(span[0], span[1]):
s += example_sent[i][0] + ' '
print s
def get_training_data():
f = open(path + 'crf_files/difficulty_crf_mv.json', 'r')
for line in f:
dict = json.loads(line)
return dict
def get_train_test_sets():
test_docids = []
f = open(path + 'crf_files/gold_docids.txt', 'r')
for line in f:
test_docids.append(line[:-1])
doc_dict = get_training_data()
test_sents, train_sents = {}, {}
count = 0
for docid in doc_dict:
sents = doc_dict[docid]
if len(sents) == 0: continue
count += 1
if docid not in test_docids:
train_sents[docid] = sents
else:
test_sents[docid] = sents
f = open(path + 'difficulty_new.json', 'r')
for line in f:
doc_dict_new = json.loads(line)
count = 1
for docid in doc_dict_new:
if docid in train_sents.keys(): continue
if count < 9481:
count += 1
continue
train_sents[docid] = doc_dict_new[docid]
count += 1
return train_sents, test_sents
if __name__ == '__main__':
run()
| false
| true
|
7904fd41e8a90447ad7f352d2062faa044f1b8b9
| 266,956
|
py
|
Python
|
run_slurm.py
|
wang3702/barlowtwins
|
6d1dc9d31f8f3c87fa4148b7dada0fe9e34805d1
|
[
"MIT"
] | null | null | null |
run_slurm.py
|
wang3702/barlowtwins
|
6d1dc9d31f8f3c87fa4148b7dada0fe9e34805d1
|
[
"MIT"
] | null | null | null |
run_slurm.py
|
wang3702/barlowtwins
|
6d1dc9d31f8f3c87fa4148b7dada0fe9e34805d1
|
[
"MIT"
] | null | null | null |
import os
import argparse
from ops.os_operation import mkdir
import time
def write_slurm_sh_multi_H2(id,command_line, queue_name="learnfair",nodes=1,
gpu_per_node=8,wall_time=3*24*60,username="wang3702",CPU_PER_GPU=8):
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dependency_handler_path = os.path.join(os.getcwd(), "ops")
dependency_handler_path = os.path.join(dependency_handler_path, "handler.txt")
run_path = os.path.join(os.getcwd(), "log")
mkdir(run_path)
run_path = os.path.abspath(run_path)
prefix = "node%d_gpu%d"%(nodes,gpu_per_node)
batch_file = os.path.join(run_path, prefix+"slurm_job_" + str(id) + ".sh")
output_path = os.path.join(run_path, prefix+"output_" + str(id) + "_" + str(formatted_today + now) + ".log")
error_path = os.path.join(run_path, prefix+"error_" + str(id) + "_" + str(formatted_today + now) + ".log")
with open(batch_file, "w") as file:
file.write("#! /bin/bash\n")#!/bin/bash
file.write("#SBATCH --job-name=%s\n" % id)
file.write("#SBATCH --output=%s\n" % output_path)
file.write("#SBATCH --error=%s\n" % error_path)
file.write("#SBATCH --partition=%s\n"%queue_name)
file.write("#SBATCH --signal=USR1@600\n")
file.write("#SBATCH --nodes=%d\n" % nodes)
file.write("#SBATCH --ntasks-per-node=%d\n" % 1)
file.write("#SBATCH --mem=%dG\n"%(350/8*gpu_per_node))
file.write("#SBATCH --gpus=%d\n" % (nodes * gpu_per_node))
file.write("#SBATCH --gpus-per-node=%d\n" % (gpu_per_node))
file.write("#SBATCH --cpus-per-task=%d\n"%(CPU_PER_GPU*gpu_per_node))
file.write("#SBATCH --time=%d\n"%wall_time)
file.write("#SBATCH --mail-user=%s@fb.com\n"%username)
file.write("#SBATCH --mail-type=FAIL\n")
file.write("#SBATCH --mail-type=end \n")
file.write('#SBATCH --constraint="volta"\n')
report_info = "%s job failed; \t" % id
report_info += "log path: %s; \t" % output_path
report_info += "error record path: %s\t" % error_path
report_info += "command line path: %s\t" % batch_file
file.write('#SBATCH --comment="%s"\n' % (report_info))
with open(dependency_handler_path, 'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
file.write("export GLOO_SOCKET_IFNAME=\nexport NCCL_SOCKET_IFNAME=\n")
file.write("module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n")
#file.write("bash /private/home/wang3702/.bashrc\n")
#file.write("module load anaconda3\n")
file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
file.write("conda activate pytorch2\n")
file.write("master_node=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:4}\n")
file.write('dist_url="tcp://"\n')
file.write("dist_url+=$master_node\n")
file.write("dist_url+=:40000\n")
file.write("export MASTER_ADDR=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:4}\n")
file.write("export MASTER_PORT=29500\n")
file.write("srun --label "+command_line + " --slurm=1 --dist_url=$dist_url &\n")
file.write("wait $!\n")
file.write("set +x \n")
file.write("echo ..::Job Finished, but No, AGI is to BE Solved::.. \n")
# signal that job is finished
os.system('sbatch ' + batch_file)
def find_checkpoint(current_dir,checkpoint_name):
if not os.path.isdir(current_dir):
return None
listfiles = os.listdir(current_dir)
for item in listfiles:
sub_dir = os.path.join(current_dir,item)
if item==checkpoint_name:
return sub_dir
elif os.path.isdir(sub_dir):
search_result = find_checkpoint(sub_dir,checkpoint_name)
if search_result is not None:
return search_result
return None
def write_slurm_sh_multi(id,command_line, queue_name="learnfair",nodes=1,
gpu_per_node=8,wall_time=3*24*60,username="wang3702",
CPU_PER_GPU=8,gpu_memory=False,environment=0):
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dependency_handler_path = os.path.join(os.getcwd(), "ops")
dependency_handler_path = os.path.join(dependency_handler_path, "handler.txt")
run_path = os.path.join(os.getcwd(), "log")
mkdir(run_path)
run_path = os.path.abspath(run_path)
prefix = "node%d_gpu%d"%(nodes,gpu_per_node)
batch_file = os.path.join(run_path, prefix+"slurm_job_" + str(id) + ".sh")
output_path = os.path.join(run_path, prefix+"output_" + str(id) + "_" + str(formatted_today + now) + ".log")
error_path = os.path.join(run_path, prefix+"error_" + str(id) + "_" + str(formatted_today + now) + ".log")
with open(batch_file, "w") as file:
file.write("#! /bin/bash\n")#!/bin/bash
file.write("#SBATCH --job-name=%s\n" % id)
file.write("#SBATCH --output=%s\n" % output_path)
file.write("#SBATCH --error=%s\n" % error_path)
file.write("#SBATCH --partition=%s\n"%queue_name)
file.write("#SBATCH --signal=USR1@600\n")
file.write("#SBATCH --nodes=%d\n" % nodes)
file.write("#SBATCH --ntasks-per-node=%d\n" % 1)
file.write("#SBATCH --mem=%dG\n"%(350/8*gpu_per_node))#--mem : Specify the real memory required per node.
file.write("#SBATCH --gpus=%d\n" % (nodes * gpu_per_node))
file.write("#SBATCH --gpus-per-node=%d\n" % (gpu_per_node))
file.write("#SBATCH --cpus-per-task=%d\n"%(CPU_PER_GPU*gpu_per_node))
file.write("#SBATCH --time=%d\n"%wall_time)
file.write("#SBATCH --mail-user=%s@fb.com\n"%username)
file.write("#SBATCH --mail-type=FAIL\n")
file.write("#SBATCH --mail-type=end \n")
if gpu_memory is False:
file.write('#SBATCH --constraint="volta"\n')
else:
file.write('#SBATCH --constraint="volta32gb"\n')
#file.write('#SBATCH --constraint="volta"\n')
report_info = "%s job failed; \t" % id
report_info += "log path: %s; \t" % output_path
report_info += "error record path: %s\t" % error_path
report_info += "command line path: %s\t" % batch_file
file.write('#SBATCH --comment="%s"\n' % (report_info))
with open(dependency_handler_path, 'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
file.write("export GLOO_SOCKET_IFNAME=\nexport NCCL_SOCKET_IFNAME=\n")
file.write("module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n")
#file.write("bash /private/home/wang3702/.bashrc\n")
file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
#file.write("module load anaconda3\n")
file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
if environment==0:
file.write("conda activate pytorch2\n")
else:
file.write("conda activate pytorch\n")
file.write("master_node=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:4}\n")
file.write('dist_url="tcp://"\n')
file.write("dist_url+=$master_node\n")
file.write("dist_url+=:40000\n")
file.write("export MASTER_ADDR=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:4}\n")
file.write("export MASTER_PORT=29500\n")
file.write("srun --label "+command_line + " --slurm=1 --dist_url=$dist_url &\n")
file.write("wait $!\n")
file.write("set +x \n")
file.write("echo ..::Job Finished, but No, AGI is to BE Solved::.. \n")
# signal that job is finished
os.system('sbatch ' + batch_file)
def write_slurm_sh_multi2(id,command_line, queue_name="learnfair",nodes=1,
gpu_per_node=8,wall_time=3*24*60,username="wang3702",CPU_PER_GPU=8,
gpu_memory=False,environment=0):
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dependency_handler_path = os.path.join(os.getcwd(), "ops")
dependency_handler_path = os.path.join(dependency_handler_path, "handler.txt")
run_path = os.path.join(os.getcwd(), "log")
mkdir(run_path)
run_path = os.path.abspath(run_path)
prefix = "node%d_gpu%d"%(nodes,gpu_per_node)
batch_file = os.path.join(run_path, prefix+"slurm_job_" + str(id) + ".sh")
output_path = os.path.join(run_path, prefix+"output_" + str(id) + "_" + str(formatted_today + now) + ".log")
error_path = os.path.join(run_path, prefix+"error_" + str(id) + "_" + str(formatted_today + now) + ".log")
with open(batch_file, "w") as file:
file.write("#! /bin/bash\n")#!/bin/bash
file.write("#SBATCH --job-name=%s\n" % id)
file.write("#SBATCH --output=%s\n" % output_path)
file.write("#SBATCH --error=%s\n" % error_path)
file.write("#SBATCH --partition=%s\n"%queue_name)
file.write("#SBATCH --signal=USR1@600\n")
file.write("#SBATCH --nodes=%d\n" % nodes)
file.write("#SBATCH --ntasks-per-node=%d\n" % 1)
file.write("#SBATCH --mem=%dG\n"%(350/8*gpu_per_node))
file.write("#SBATCH --gpus=%d\n" % (nodes * gpu_per_node))
file.write("#SBATCH --gpus-per-node=%d\n" % (gpu_per_node))
file.write("#SBATCH --cpus-per-task=%d\n"%(CPU_PER_GPU*gpu_per_node))
file.write("#SBATCH --time=%d\n"%wall_time)
file.write("#SBATCH --mail-user=%s@fb.com\n"%username)
file.write("#SBATCH --mail-type=FAIL\n")
file.write("#SBATCH --mail-type=end \n")
if gpu_memory is False:
file.write('#SBATCH --constraint="volta"\n')
else:
file.write('#SBATCH --constraint="volta32gb"\n')
report_info = "%s job failed; \t" % id
report_info += "log path: %s; \t" % output_path
report_info += "error record path: %s\t" % error_path
report_info += "command line path: %s\t" % batch_file
file.write('#SBATCH --comment="%s"\n' % (report_info))
with open(dependency_handler_path, 'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
file.write("export GLOO_SOCKET_IFNAME=\nexport NCCL_SOCKET_IFNAME=\n")
file.write("module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n")
#file.write("bash /private/home/wang3702/.bashrc\n")
# file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
#file.write("module load anaconda3\n")
file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
if environment==0:
file.write("conda activate pytorch2\n")
else:
file.write("conda activate pytorch\n")
#file.write("source activate\n")
file.write("master_node=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:3}\n")
file.write('dist_url="tcp://"\n')
file.write("dist_url+=$master_node\n")
file.write("dist_url+=:40000\n")
file.write("export MASTER_ADDR=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:3}\n")
file.write("export MASTER_PORT=29500\n")
file.write("srun --label "+command_line + " &\n")
file.write("wait $!\n")
file.write("set +x \n")
file.write("echo ..::Job Finished, but No, AGI is to BE Solved::.. \n")
# signal that job is finished
os.system('sbatch ' + batch_file)
def write_slurm_sh_faster(id,command_line, queue_name="learnfair",nodes=1,
gpu_per_node=8,wall_time=3*24*60,username="wang3702",CPU_PER_GPU=8,
gpu_memory=False,environment=0):
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dependency_handler_path = os.path.join(os.getcwd(), "ops")
dependency_handler_path = os.path.join(dependency_handler_path, "handler.txt")
run_path = os.path.join(os.getcwd(), "log")
mkdir(run_path)
run_path = os.path.abspath(run_path)
batch_file = os.path.join(run_path, "slurm_job_" + str(id) + ".sh")
output_path = os.path.join(run_path, "output_" + str(id) + "_" + str(formatted_today + now) + ".log")
error_path = os.path.join(run_path, "error_" + str(id) + "_" + str(formatted_today + now) + ".log")
with open(batch_file, "w") as file:
file.write("#!/bin/bash\n")#!/bin/bash
file.write("#SBATCH --job-name=%s\n" % id)
file.write("#SBATCH --output=%s\n" % output_path)
file.write("#SBATCH --error=%s\n" % error_path)
file.write("#SBATCH --partition=%s\n"%queue_name)
file.write("#SBATCH --signal=USR1@600\n")
file.write("#SBATCH --nodes=%d\n" % nodes)
file.write("#SBATCH --ntasks-per-node=%d\n" % gpu_per_node)
file.write("#SBATCH --mem=%dG\n"%(int(350/8*gpu_per_node)))
file.write("#SBATCH --gpus=%d\n" % (nodes * gpu_per_node))
file.write("#SBATCH --cpus-per-task=%d\n"%(CPU_PER_GPU))
file.write("#SBATCH --time=%d\n"%wall_time)
file.write("#SBATCH --mail-user=%s@fb.com\n"%username)
file.write("#SBATCH --mail-type=FAIL\n")
file.write("#SBATCH --mail-type=end \n")
if gpu_memory:
file.write('#SBATCH --constraint="volta32gb"\n')
else:
file.write('#SBATCH --constraint="volta"\n')
report_info = "%s job failed; \t" % id
report_info += "log path: %s; \t" % output_path
report_info += "error record path: %s\t" % error_path
report_info += "command line path: %s\t" % batch_file
file.write('#SBATCH --comment="%s"\n' % (report_info))
with open(dependency_handler_path, 'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
file.write("module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n")
#file.write("bash /private/home/wang3702/.bashrc\n")
file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
#file.write("module load anaconda3\n")
file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
if environment==0:
file.write("conda activate pytorch2\n")
else:
file.write("conda activate pytorch\n")
#file.write("source activate\n")
file.write(command_line + " &\n")
file.write("wait $!\n")
file.write("set +x \n")
file.write("echo ..::Job Finished, but No, AGI is to BE Solved::.. \n")
# signal that job is finished
os.system('sbatch ' + batch_file)
def write_slurm_sh(id,command_line, queue_name="learnfair",nodes=1,
gpu_per_node=8,wall_time=3*24*60,username="wang3702",CPU_PER_GPU=10):
"""
Args:
id: running id
command_line: command line
outlog_path: saving path
Returns:
"""
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dependency_handler_path = os.path.join(os.getcwd(),"ops")
dependency_handler_path = os.path.join(dependency_handler_path,"handler.txt")
run_path = os.path.join(os.getcwd(),"log")
mkdir(run_path)
run_path = os.path.abspath(run_path)
batch_file = os.path.join(run_path,"slurm_job_"+str(id)+".sh")
output_path = os.path.join(run_path,"output_"+str(id)+"_"+str(formatted_today+now)+".log")
error_path = os.path.join(run_path,"error_"+str(id)+"_"+str(formatted_today+now)+".log")
with open(batch_file,"w") as file:
file.write("#!/bin/sh\n")
file.write("#SBATCH --job-name=%s\n"%id)
file.write("#SBATCH --output=%s\n"%output_path)
file.write("#SBATCH --error=%s\n"%error_path)
file.write("#SBATCH --partition=%s\n"%queue_name)
file.write("#SBATCH --signal=USR1@600\n")
file.write("#SBATCH --nodes=%d\n"%nodes )
file.write("#SBATCH --ntasks-per-node=1\n")
file.write("#SBATCH --mem=350G\n")
file.write("#SBATCH --gpus=%d\n"%(nodes*gpu_per_node))
file.write("#SBATCH --gpus-per-node=%d\n" % (gpu_per_node))
file.write("#SBATCH --cpus-per-task=%d\n"%(CPU_PER_GPU*gpu_per_node))
file.write("#SBATCH --time=%d\n"%wall_time)
file.write("#SBATCH --mail-user=%s@fb.com\n"%username)
file.write("#SBATCH --mail-type=FAIL\n")
file.write("#SBATCH --mail-type=end \n")
file.write('#SBATCH --constraint="volta"\n')
report_info ="%s job failed; \t"%id
report_info += "log path: %s; \t"%output_path
report_info += "error record path: %s\t"%error_path
report_info += "command line path: %s\t"%batch_file
file.write('#SBATCH --comment="%s"\n'%(report_info))
with open(dependency_handler_path,'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
#file.write("bash /private/home/wang3702/.bashrc\n")
# file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
#file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
#file.write("module load anaconda3\n")
#file.write("conda activate pytorch2\n")
file.write("module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n")
file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
file.write("conda activate pytorch2\n")
file.write(command_line+" &\n")
file.write("wait $!\n")
file.write("set +x \n")
file.write("echo ..::Job Finished, but No, AGI is to BE Solved::.. \n")
# signal that job is finished
os.system('sbatch ' + batch_file)
parser = argparse.ArgumentParser(description='slurm job submission')
parser.add_argument('--data', default="imagenet", type=str, metavar='DIR',
help='path to dataset')
parser.add_argument("--mode",type=int,default=0,help="control mode for training")
parser.add_argument("--type",type=int,default=0,help="running type control")
parser.add_argument("--roi",type=int,default = 20, help="number of rois sampled here")
parser.add_argument("--queue",type=int,default=0, help="queue specified list")
parser.add_argument("-F",type=str, default=None, help="resume path for running again")
parser.add_argument("--comment", type=str,default=None,help="adding comment for script names")
parser.add_argument("--node",type=int,default=1,help="nodes needed for training")
parser.add_argument("--gpu",type=int,default=8,help="number of gpus per node")
args = parser.parse_args()
if args.queue ==0:
queue_name = "learnfair"
elif args.queue ==1:
queue_name = "dev"
elif args.queue ==2:
queue_name = "scavenge"
elif args.queue ==3:
queue_name = 'priority'
elif args.queue ==4:
queue_name = 'learnlab'
elif args.queue==5:
queue_name = 'devlab'
elif args.queue==6:
queue_name = 'prioritylab'
dump_path= os.path.join(os.getcwd(),"swav_dump_100")
from ops.os_operation import mkdir
mkdir(dump_path)
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dump_path = os.path.join(dump_path, formatted_today + now)
if args.mode==1:
if args.type==0:
# command_line = "python3 main_adco.py --mode=1 --lr=0.06 --data=%s " \
# "--dist_url=tcp://localhost:10031 --epochs=100 " \
# "--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0006 " \
# "--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
# "--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=57" % args.data
# write_slurm_sh("baseline_sym_moco_lr0.06_proj", command_line, queue_name)
command_line = "python3 main_adco.py --mode=1 --lr=0.06 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0006 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=16 --mask_size=32 " \
"--num_roi=1 " % args.data
write_slurm_sh("baseline_sym_moco_lr0.06", command_line, queue_name)
# command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
# "--dist_url=tcp://localhost:10031 --epochs=100 " \
# "--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
# "--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
# "--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=16 --mask_size=32 " \
# "--num_roi=1 --img_size=96 " % args.data
# write_slurm_sh("baseline_sym_moco_input96", command_line, queue_name)
#running all the baseline with 100 epochs
#base line moco
# command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
# "--dist_url=tcp://localhost:10031 --epochs=100 " \
# "--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
# "--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
# "--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=35 --mask_size=32 " \
# " --num_roi=1 " % args.data
# write_slurm_sh("baseline_sym_mocobn_100", command_line, queue_name)
# #moco multi baseline
# command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
# "--dist_url=tcp://localhost:10031 --epochs=100 " \
# "--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
# "--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
# "--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=18 --nmb_crops 2 6 " \
# "--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " % (args.data)
# write_slurm_sh("multi_moco_baseline_100_new", command_line, queue_name)
# # #moco multi sym baseline
# command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
# "--dist_url=tcp://localhost:10031 --epochs=100 " \
# "--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
# "--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
# "--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=20 --nmb_crops 4 " \
# "--size_crops 224 --min_scale_crops 0.14 --max_scale_crops 1.0 " % (args.data)
# write_slurm_sh("2key_multi_moco_baseline_4_224", command_line, queue_name)
# #swav multi baseline
# command_line = "python3 main_adco.py --mode=5 --type=0 --data=%s --epochs 100 --lr=0.6 " \
# "--lr_final 0.0006 --batch_size=256 --warmup_epochs 0 --freeze_prototypes_niters 5005 " \
# "--queue_length 3840 --epoch_queue_starts 15 --dist_url=tcp://localhost:10031 " \
# "--knn_batch_size=256 --cos=1 --momentum=0.9 --weight_decay=1e-6 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 --moco_k=3000 --moco_t=0.1 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --nmb_crops 2 " \
# "--size_crops 224 --min_scale_crops 0.14 --max_scale_crops 1.0 --dump_path %s " % (args.data,dump_path)
# write_slurm_sh("swav_baseline_100_only224", command_line, queue_name)
# command_line = "python3 main_adco.py --mode=5 --type=0 --data=%s --epochs 100 --lr=0.6 " \
# "--lr_final 0.0006 --batch_size=256 --warmup_epochs 0 --freeze_prototypes_niters 5005 " \
# "--queue_length 3840 --epoch_queue_starts 15 --dist_url=tcp://localhost:10031 " \
# "--knn_batch_size=256 --cos=1 --momentum=0.9 --weight_decay=1e-6 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 --moco_k=3000 --moco_t=0.1 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --nmb_crops 2 6 " \
# "--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 --dump_path %s " % (
# args.data, dump_path)
# write_slurm_sh("swav_baseline_100", command_line, queue_name)
elif args.type==10:
#half dropout results
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=10 " % args.data
if args.F is not None:
resume_name = os.path.split(os.path.abspath(args.F))[1]
command_line += "--resume=%s"%args.F
write_slurm_sh("halfdropoutnew_resume%s"%resume_name, command_line, queue_name)
else:
write_slurm_sh("halfdropoutnew", command_line, queue_name)
elif args.type==11:
# to make sure overlap region can really not work
for mask_size in [96, 160]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=11 --shift_ratio=0 " \
" --mask_size=%d " % (args.data,mask_size)
write_slurm_sh("type11_roimatch_%s"%mask_size, command_line, queue_name)
elif args.type==13:
for mask_size in [96,160]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=13 " \
"--mask_size=%d "%(args.data,mask_size)
write_slurm_sh("type13_singleroi_vs_global_%d"%mask_size,command_line,queue_name)
time.sleep(1)
elif args.type==14:
#roi vs global
for mask_size in [96,160]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=14 " \
"--mask_size=%d "%(args.data,mask_size)
write_slurm_sh("type14_singleroi_vs_global_%d"%mask_size,command_line,queue_name)
elif args.type==16:
for mask_size in [96,128,160]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=16 " \
"--mask_size=%d --num_roi=10 "%(args.data,mask_size)
write_slurm_sh("type16_roi+global_vs_global_%d"%mask_size,command_line,queue_name)
elif args.type==-16:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=16 --mask_size=32 --num_roi=1 " % args.data
if args.F is not None:
resume_name = os.path.split(os.path.abspath(args.F))[1]
command_line += " --resume=%s"%args.F
write_slurm_sh("baseline_sym_moco_resume%s"%resume_name, command_line, queue_name)
else:
write_slurm_sh("baseline_sym_moco", command_line,queue_name)
elif args.type==17:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=17 --mask_size=32" \
" --num_roi=%d" % (args.data,args.roi)
write_slurm_sh("type17_randroi_%d"%args.roi, command_line,queue_name)
elif args.type==-17:
#roi vs roi,with global as negative
for roi in [10,20,50,100]:
for mask_size in [32, 96, 160, 196]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=17 --mask_size=%d" \
" --num_roi=%d" % (args.data,mask_size, roi)
write_slurm_sh("type17_randroi_%d_masksize_%d" % (roi,mask_size), command_line,queue_name)
elif args.type==18:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=18 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 "% (args.data)
if args.F is not None:
resume_name = os.path.split(os.path.abspath(args.F))[1]
command_line += "--resume=%s"%args.F
write_slurm_sh("multi_moco_baseline_resume%s"%resume_name, command_line, queue_name)
else:
write_slurm_sh("multi_moco_baseline" , command_line, queue_name)
elif args.type==19:
for roi in [20]:
for mask_size in [32,160]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=19 --mask_size=%d" \
" --num_roi=%d" % (args.data,mask_size, roi)
write_slurm_sh("type19_randroi_%d_masksize_%d" % (roi,mask_size), command_line,queue_name)
elif args.type==20:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=20 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 "% (args.data)
if args.F is not None:
resume_name = os.path.split(os.path.abspath(args.F))[1]
command_line += " --resume=%s"%args.F
write_slurm_sh("2key_multi_moco_baseline_correct_resume%s"%resume_name, command_line, queue_name)
else:
write_slurm_sh("2key_multi_moco_baseline_correct", command_line, queue_name)
elif args.type==21:
for roi in [20]:
for mask_size in [96]:
command_line = "python3 main_adco.py --mode=1 --lr=0.09 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=768 --knn_batch_size=256 --cos=1 --lr_final=0.0009 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=21 --mask_size=%d" \
" --num_roi=%d" % (args.data,mask_size, roi)
write_slurm_sh("type21_randroi_%d_masksize_%d" % (roi,mask_size), command_line,queue_name)
elif args.type==22:
for roi in [50]:
for mask_size in [96]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=22 --mask_size=%d" \
" --num_roi=%d" % (args.data, mask_size, roi)
write_slurm_sh("type22_randroi_%d_masksize_%d" % (roi,mask_size), command_line,queue_name)
elif args.type==23:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=23 --nmb_crops 2 2 2 2 2 2 2 2" \
" --size_crops 96 112 128 144 160 176 192 208 " % args.data
write_slurm_sh("type23_specifyroi", command_line, queue_name)
elif args.type==-23:
# command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
# "--dist_url=tcp://localhost:10031 --epochs=200 " \
# "--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
# "--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
# "--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=23 --nmb_crops 6" \
# " --size_crops 96 " % args.data
# write_slurm_sh("type23_specifyroi_6_96", command_line, queue_name)
min_scale = 64
max_scale = 224
divide_list = [2,4,8,16,32]
pick_times = [1,2,3]
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale<max_scale:
check_list+=str(current_scale)+" "
num_list+=str(pick_time)+" "
current_scale+=divide
print(check_list)
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=23 --nmb_crops %s " \
" --size_crops %s " % (args.data,num_list,check_list)
write_slurm_sh("type23_specifyroi_%d_%d"%(pick_time,divide), command_line, queue_name)
elif args.type==24:
for alpha in [0.5, 1.0, 2.0]:
for local_t in [0.1,0.2,0.3]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=24 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=1.0 " % (args.data,local_t)
write_slurm_sh("type24_lg_t_%.3f_alpha_%.2f"%(local_t,alpha), command_line, queue_name)
elif args.type==25:
for alpha in [0.5]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=24 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % (args.data, local_t,alpha)
write_slurm_sh("type25_lgq_t_%.3f_alpha_%.2f" %(local_t,alpha), command_line, queue_name)
elif args.type==26:
for alpha in [0.5,1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=26 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % (args.data, local_t,alpha)
write_slurm_sh("type26_lgq_t_%.3f_alpha_%.2f" %(local_t,alpha), command_line, queue_name)
elif args.type == 27:
min_scale = 96
max_scale = 224
divide_list = [16]
pick_times = [1]
for learning_rate in [0.05]:#[0.02,0.03,0.04,0.05,0.06,0.1,0.15]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0.1,0.15,0.2,0.3]:#[0.3, 0.5, 1.0]:
for local_t in [0.12,0.15,0.18]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=27 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f " % \
(learning_rate,args.data, local_t,num_list, check_list, local_t, alpha)
write_slurm_sh("type27_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, pick_time, divide,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == -270:
for num_roi in [6,10,20,30]:
for crop_size in [64, 96, 128, 160, 192]:
for learning_rate in [0.05]:
for alpha in [0.1]: # [0.3, 0.5, 1.0]:
for local_t in [0.18]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=27 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh(
"type27crop_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==-271:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0.1]: # [0.3, 0.5, 1.0]:
for local_t in [0.18,0.2]:
for moco_dim in [256,512]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=%d " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=27 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data,moco_dim, local_t, num_roi, crop_size, local_t, alpha)
write_slurm_sh(
"type27dim_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f_dim%d" % (
local_t, alpha, num_roi, crop_size, learning_rate,moco_dim),
command_line, queue_name)
time.sleep(1)
elif args.type == -27:
#calculate baseline 6*96 for type 27 as a direct cmp with SWAV
for learning_rate in [0.05]:
for alpha in [0.1]: # [0.3, 0.5, 1.0]:
for local_t in [0.18]:
for moco_dim in [128,256,512]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=27 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type27baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 28:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=28 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " % (args.data)
write_slurm_sh("type28_small_inside", command_line, queue_name)
elif args.type==29:
for learning_rate in [0.03]:
for alpha in [0.5,1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.2f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.5f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=29 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " \
"" % (learning_rate,args.data, learning_rate/100,local_t, alpha)
write_slurm_sh("type29_lgq_t_%.3f_alpha_%.2f_lr_%.4f" % (local_t, alpha,learning_rate), command_line, queue_name)
elif args.type==30:
for learning_rate in [0.03]:
for alpha in [0.5,1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.2f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.5f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=30 --nmb_crops 6 " \
" --size_crops 96 --local_t=%.4f --alpha=%.2f " \
"" % (learning_rate,args.data, learning_rate/100,local_t, alpha)
write_slurm_sh("type30_lgq_t_%.3f_alpha_%.2f_lr_%.4f" % (local_t, alpha,learning_rate), command_line, queue_name)
elif args.type==31:
for learning_rate in [0.03]:
for alpha in [0.5]:
for local_t in [0.2]:
for num_roi in [5, 10, 20]:
for mask_size in [96]:
command_line = "python3 main_adco.py --mode=1 --lr=%.2f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.5f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=31 " \
"--local_t=%.4f --alpha=%.2f --num_roi=%d --mask_size=%d " \
"" % (learning_rate, args.data, learning_rate / 100,
local_t, alpha,num_roi,mask_size)
write_slurm_sh("type31_lgq_t_%.3f_alpha_%.2f_lr_%.4f_roi%d_mask%d" %
(local_t, alpha, learning_rate,num_roi,mask_size),
command_line, queue_name)
elif args.type==32:
for learning_rate in [0.03]:
for alpha in [0.5]:
for local_t in [0.2]:
for num_roi in [5, 10, 20]:
for mask_size in [96]:
command_line = "python3 main_adco.py --mode=1 --lr=%.2f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.5f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=32 " \
"--local_t=%.4f --alpha=%.2f --num_roi=%d --mask_size=%d " \
"" % (learning_rate, args.data, learning_rate / 100,
local_t, alpha,num_roi,mask_size)
write_slurm_sh("type32_lgq_t_%.3f_alpha_%.2f_lr_%.4f_roi%d_mask%d" %
(local_t, alpha, learning_rate,num_roi,mask_size),
command_line, queue_name)
elif args.type==33:
for learning_rate in [0.03,0.04,0.05,0.06,0.09,0.12]:
for alpha in [0.5,1.0,2.0,5.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=33 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh("multimoco_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name)
elif args.type==-28:
for learning_rate in [0.06]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=28 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh("multimocoinside_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name)
elif args.type==34:
min_scale = 96
max_scale = 224
divide_list = [16]
pick_times = [1]
for learning_rate in [0.04, 0.05]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0.1, 0.3, 0.5,1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=34 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, num_list, check_list, local_t, alpha)
write_slurm_sh("type34_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, pick_time, divide, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 36:
min_scale = 96
max_scale = 224
divide_list = [16]
pick_times = [1]
for learning_rate in [0.04,0.05]:#[0.02,0.03,0.04,0.05,0.06,0.1,0.15]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0.1]:#[0.3, 0.5, 1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=36 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f " % \
(learning_rate,args.data, local_t,num_list, check_list, local_t, alpha)
write_slurm_sh("type36_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, pick_time, divide,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==37:
for learning_rate in [0.03,0.04,0.05,0.06]:
for alpha in [0.1,0.3,0.5,1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=37 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type37baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==38:
min_scale = 96
max_scale = 224
divide_list = [16]
pick_times = [1]
for learning_rate in [0.05]: # [0.02,0.03,0.04,0.05,0.06,0.1,0.15]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0]: #[0.1, 0.3, 0.5, 1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=38 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,"", "", local_t, alpha)
write_slurm_sh("type38_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, pick_time, divide, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==-38:
for learning_rate in [0.05]:
for alpha in [0.1,0.3,0.5,1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=38 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type38baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==39:
for learning_rate in [0.05]:
for alpha in [0.1]: # [0.3, 0.5, 1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=39 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type39baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==40:
for learning_rate in [0.05]:
for alpha in [0.5]: # [0.3, 0.5, 1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=40 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type40baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==41:
for mask_size in [96]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=41 " \
"--mask_size=%d "%(args.data,mask_size)
write_slurm_sh("type41_singleroi_vs_global_%d"%mask_size,command_line,queue_name)
elif args.type==42:
for learning_rate in [0.05]:
for alpha in [0.1,0.5]: # [0.3, 0.5, 1.0]:
for local_t in [0.15,0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=42 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type42baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==43:
for learning_rate in [0.05]:
for alpha in [0.1,0.5]: # [0.3, 0.5, 1.0]:
for local_t in [0.15,0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=43 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type43baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 44:
# for num_roi in [6]:
# for crop_size in [96]:
# for learning_rate in [0.05]:
# for alpha in [0.1]: # [0.3, 0.5, 1.0]:
# for local_t in [0.15, 0.18, 0.2]:
# for sample_ratio in [2,4]:
# command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
# "--dist_url=tcp://localhost:10031 --epochs=100 " \
# "--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
# "--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
# "--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=44 --nmb_crops 1 %d" \
# " --size_crops 224 %d --local_t=%.4f --alpha=%.2f --sample_ratio=%d " % \
# (learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha,sample_ratio)
# write_slurm_sh(
# "type44crop_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f_ratio%d" % (local_t, alpha, num_roi,crop_size, learning_rate,sample_ratio),
# command_line, queue_name)
# time.sleep(1)
for num_roi in [6]:
for crop_size in [96,192]:
for learning_rate in [0.03,0.05,0.06]:
for alpha in [0.1,0.3,0.5,1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=44 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh(
"type44_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==-44:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0.1,0.5]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=44 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh(
"type44align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==45 or args.type==46:
for crop_size in [96]:
for learning_rate in [0.03,0.04,0.05]:
for alpha in [0.1,0.3,0.5,1,2]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --mask_size %d" \
" --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t, args.type, crop_size,local_t, alpha)
write_slurm_sh(
"type%d_crop_lgq_t_%.3f_alpha_%.2f_%d_lr%.4f" % (args.type, local_t,alpha,
crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type ==47:
min_scale = 96
max_scale = 224
divide_list = [16]
pick_times = [1]
for learning_rate in [0.03,0.05]: # [0.02,0.03,0.04,0.05,0.06,0.1,0.15]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0.1,0.5,1.0]: # [0.1, 0.3, 0.5, 1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=47 " \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t, check_list, local_t, alpha)
write_slurm_sh("type47_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, pick_time, divide, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type ==49:
min_scale = 96
max_scale = 224
divide_list = [2,4,8,16,32]
pick_times = [1]
for learning_rate in [0.06]: # [0.02,0.03,0.04,0.05,0.06,0.1,0.15]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0]: # [0.3, 0.5, 1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=49 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data, local_t, num_list,check_list, local_t, alpha)
write_slurm_sh_faster(
"type49crop_lgq_t_%.3f_alpha_%.2f_divide%d_lr%.4f" % (
local_t, alpha, divide, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==-49:
#only run on pytorch environment, not base environment
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [-0.1,-0.3,-0.5,-1]: # [0.3, 0.5, 1.0]:
for local_t in [0.18]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=49 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type49align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==50:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0, 0.1,0.5,1.0,2.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=50 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type50align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==51:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0, 0.1,0.5,1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=51 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type51align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==52:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0, 0.1,0.2,0.5,1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=52 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type52_1v1_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==53:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0, 0.1,0.5,1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=53 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type53align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==54:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0, 0.1,0.5,1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.15,0.18,0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=54 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type54align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==55:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0.1]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=55 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type55align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==551:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0.1]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=55 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type55align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==550:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0.1]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
for pred_dim in [256,1024,2048]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=55 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 --pred_dim=%d " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha,pred_dim)
write_slurm_sh_faster(
"type55dim%d_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (pred_dim,local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==56:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05,0.06]:
for alpha in [0, 0.05,0.1,0.2]: # [0.3, 0.5, 1.0]:
for local_t in [0.18, 0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=56 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type56align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==58:
for learning_rate in [0.06]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=58 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh("multimoco_proj_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name)
elif args.type==59:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=59 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type59_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==60:
for num_roi in [3,6,10,15,20,25,30]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=60 --num_roi=%d " \
" --mask_size=%d --local_t=%.4f --align=1 " % \
(learning_rate, args.data, epoch, 256,
256,learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type60_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==61:
#for num_roi in ['','6']:
# for crop_size in ['','96']:
indicate_list=[['',''],['6','96']]
for indication in indicate_list:
num_roi = indication[0]
crop_size= indication[1]
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=61 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --align=1 " % \
(learning_rate, args.data, epoch, 256, 256,
learning_rate / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type61_lgq_t_%.3f_%s_%s_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==62:
for learning_rate in [0.06]:
for alpha in [0,1.0]:#0 denotes only shuffling to influence
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=62 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh("pixelembedshufflemoco_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name)
elif args.type==63:
for learning_rate in [0.06]:
for alpha in [0,1.0]:#0 denotes only shuffling to influence
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=63 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh("pixelGLsync_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name)
elif args.type == 64:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0,0.1,0.2,0.5, 1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=64 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data, local_t, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type64align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 65:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0,0.1,0.2,0.5, 1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=65 --nmb_crops 1 %d " \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data, local_t, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type65align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 66:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0, 0.1, 0.2, 0.5, 1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=66 --nmb_crops 1 %d " \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data, local_t, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type66align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 67:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06,0.08,0.09]:
for alpha in [0, 0.1, 0.2, 0.5]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=67 --nmb_crops 1 %d " \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data, local_t, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type67align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==68:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=68 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type68_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==69:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=69 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type69_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==70:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=70 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type70_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==71:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for alpha in [0,0.05,0.1,0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=71 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --alpha=%.4f " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t,alpha)
write_slurm_sh_faster(
"type71_lgq_t_%.3f_%d_%d_lr%.4f_alpha%.4f" % (local_t, num_roi, crop_size, learning_rate,alpha),
command_line, queue_name)
time.sleep(1)
elif args.type==72:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=72 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type72_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==73:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=73 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type73_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==74:
for crop_size in [64,96,128,160,192]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=74 --mask_size %d " \
" --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, crop_size, local_t)
write_slurm_sh_faster(
"type74_lgq_t_%.3f_mask%d_lr%.4f" % (local_t, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==75:
for num_roi in [3,6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=75 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type75_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==76 or args.type==98:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(9):
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type,num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type,shuffle_mode,local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==-76:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [0,1,7]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=76 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d --mlp_bn_stat=0 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_faster(
"type76_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (shuffle_mode,local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==77:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [0,1,2,3,5,6]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=77 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_faster(
"type77_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (shuffle_mode,local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==78:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [0,1,3,4,5,7]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=78 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_faster(
"type78_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (shuffle_mode,local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==79:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(2,11):
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=79 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode)
write_slurm_sh_faster(
"type79_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==80:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1,5,7]:
for mlp_bn_stat in [0,1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=80 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" \
" --mlp_bn_stat=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode,mlp_bn_stat)
write_slurm_sh_faster(
"type80_%d_lgq_t_%.3f_%d_%d_lr%.4f_bnmode%d" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate,mlp_bn_stat),
command_line, queue_name)
time.sleep(1)
elif args.type==81:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1,5,7]:
for mlp_bn_stat in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=81 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" \
" --mlp_bn_stat=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode,mlp_bn_stat)
write_slurm_sh_faster(
"type81_%d_lgq_t_%.3f_%d_%d_lr%.4f_bnmode%d" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate,mlp_bn_stat),
command_line, queue_name)
time.sleep(1)
elif args.type==82:
for num_roi in [6,16,32,64]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1,5]:
for mlp_bn_stat in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=82 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" \
" --mlp_bn_stat=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode,mlp_bn_stat)
write_slurm_sh_faster(
"type82_%d_lgq_t_%.3f_%d_%d_lr%.4f_bnmode%d" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate,mlp_bn_stat),
command_line, queue_name)
time.sleep(1)
elif args.type == 83 or args.type==84:
for num_roi in [1,3,5,10]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for alpha in [0.1,0.2,0.5,1.0,2.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --num_roi %d" \
" --mask_size %d --local_t=%.4f --align=1 --alpha=%f " \
" " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t,args.type, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type%d_lgq_t_%.3f_%d_%d_lr%.4f_alpha%f" % (args.type,
local_t, num_roi, crop_size, learning_rate,alpha),
command_line, queue_name)
time.sleep(1)
elif args.type==85:
for num_roi in [6,16,32,64]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1,5]:
for mlp_bn_stat in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=85 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" \
" --mlp_bn_stat=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode,mlp_bn_stat)
write_slurm_sh_faster(
"type85_%d_lgq_t_%.3f_%d_%d_lr%.4f_bnmode%d" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate,mlp_bn_stat),
command_line, queue_name)
time.sleep(1)
elif args.type==86:
for num_roi in [6,16,32]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1,5,7]:
for mlp_bn_stat in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=86 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode)
write_slurm_sh_faster(
"type86_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==87 or args.type==88 or args.type==93 or args.type==94 or args.type==95 or args.type==96:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t,args.type, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type,
local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==89 or args.type==90:
for num_roi in [1,5,10]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for alpha in [0.1,0.2,0.5,1.0,2.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --num_roi %d" \
" --mask_size %d --local_t=%.4f --align=1 --alpha=%f " \
" " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t,args.type, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type%d_lgq_t_%.3f_%d_%d_lr%.4f_alpha%f" % (args.type,
local_t, num_roi, crop_size, learning_rate,alpha),
command_line, queue_name)
time.sleep(1)
elif args.type==91:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t)
write_slurm_sh_faster(
"type%d_lgq_t_%.3f_lr%.4f" % (args.type, local_t, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==92:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(4):
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,shuffle_mode)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_lr%.4f" % (args.type,shuffle_mode, local_t, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==97:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(4):
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=97 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode)
write_slurm_sh_faster(
"type97_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==99 or args.type==103 or args.type==104 or args.type==105 \
or args.type==106 or args.type==107 or args.type==108 or args.type==109 \
or args.type==110 or args.type==111 or args.type==112 or args.type==113:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,shuffle_mode)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_lr%.4f" % (args.type,shuffle_mode, local_t, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==126 or args.type==127 or args.type==129 or args.type==131:
for learning_rate in [0.03]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(8):
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,shuffle_mode)
write_slurm_sh_faster(
"type%dablation_%d_lgq_t_%.3f_lr%.4f" % (args.type,shuffle_mode, local_t, learning_rate),
command_line, queue_name,environment=1)
time.sleep(1)
elif args.type==133 or args.type==134:
for learning_rate in [0.03]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(3):
for momentum_weight_decay in [0.9,0.99,0.999]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d --use_fp16=1 --momentum_stat=%f" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t, shuffle_mode,momentum_weight_decay)
write_slurm_sh_faster(
"type%dablation_%d_%f_lgq_t_%.3f_lr%.4f" % (
args.type, shuffle_mode,momentum_weight_decay, local_t, learning_rate),
command_line, queue_name, environment=1)
time.sleep(1)
elif args.type==128 or args.type==130 or args.type==132 or args.type==135 or args.type==136:
for learning_rate in [0.03]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,2,4,8,16,32,64,128]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,group_norm_size)
write_slurm_sh_faster(
"type%dgroupablation_%d_lgq_t_%.3f_lr%.4f" % (args.type,group_norm_size, local_t, learning_rate),
command_line, queue_name,environment=1)
time.sleep(1)
elif args.type==152:
for learning_rate in [0.03]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,2,4,8,16,32,64,128]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,group_norm_size)
write_slurm_sh_faster(
"type%dgroup_%d_lgq_t_%.3f_lr%.4f" % (args.type,group_norm_size, local_t, learning_rate),
command_line, queue_name,environment=0)
time.sleep(1)
elif args.type==137 or args.type==138:
for learning_rate in [0.03]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t)
write_slurm_sh_faster(
"type%d2bnablation_lgq_t_%.3f_lr%.4f" % (args.type,local_t, learning_rate),
command_line, queue_name,environment=1)
time.sleep(1)
elif args.type==118:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1]:
for conv_size in [1,2,3,4]:
for stride_size in [1,2,3]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d --loco_conv_size=%d " \
"--loco_conv_stride=%d" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t, shuffle_mode,conv_size,stride_size)
write_slurm_sh_faster(
"type%d_%d_conv%d_%d_lr%.4f" % (args.type, shuffle_mode, conv_size,
stride_size,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==114:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,2,4,8]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,group_norm_size)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_lr%.4f" % (args.type,group_norm_size, local_t, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==115 or args.type==116 or args.type==117 or args.type==120 \
or args.type==121 or args.type==122 or args.type==123 or args.type==124:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,8]:
for alpha in [1.0,3.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.14 0.05" \
" --size_crops 224 96 --nmb_crops 2 6 --max_scale_crops 1.0 0.14 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --alpha=%f " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,group_norm_size,alpha)
write_slurm_sh_faster(
"type%d_%d_alpha%f_lgq_t_%.3f_lr%.4f" % (args.type,group_norm_size,alpha, local_t, learning_rate),
command_line, queue_name,gpu_memory=True)
time.sleep(1)
elif args.type==-120:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1]:
for num_crops in [4,8,16,32]:
same_alpha = int(num_crops / 2) - 1
iter_alpha =[same_alpha,1.0] if same_alpha!=1 else [1.0]
for alpha in iter_alpha:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.14 " \
" --size_crops 96 --nmb_crops %d --max_scale_crops 1.0 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --alpha=%f --use_fp16=1" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_crops,abs(args.type), local_t, group_norm_size, alpha)
write_slurm_sh_faster(
"type%d_%d_%d_alpha%f_lgq_t_%.3f_lr%.4f" % (
args.type,num_crops, group_norm_size, alpha, local_t, learning_rate),
command_line, queue_name, gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==139 or args.type==140 or args.type==141 or args.type==142 \
or args.type==143 or args.type==144 or args.type==145 or args.type==146 or args.type==147:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1]:
for num_crops in [4,8,16]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.14 " \
" --size_crops 96 --nmb_crops %d --max_scale_crops 1.0 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_crops,args.type, local_t, group_norm_size)
write_slurm_sh_faster(
"type%dviewnorm_%d_%d_lgq_t_%.3f_lr%.4f" % (
args.type, num_crops,group_norm_size, local_t, learning_rate),
command_line, queue_name, gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==148 or args.type==149 or args.type==150:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1]:
for num_crops in [4,8,16,32]:
for crop_size in [224,96]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.2 " \
" --size_crops %d --nmb_crops %d --max_scale_crops 1.0 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, crop_size,num_crops, args.type, local_t, group_norm_size)
write_slurm_sh_faster(
"type%dviewnorm_%d_%d_group%d_lgq_t_%.3f_lr%.4f" % (
args.type, num_crops,crop_size, group_norm_size, local_t, learning_rate),
command_line, queue_name, gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==151:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
" --type=%d --min_scale_crops 0.14 0.05 " \
" --size_crops 224 96 --nmb_crops 4 6 --max_scale_crops 1.0 0.14" \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 --alpha 1.0" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t, group_norm_size)
write_slurm_sh_faster(
"type%dmultiquery_viewkey_group%d_lgq_t_%.3f_lr%.4f" % (
args.type, group_norm_size, local_t, learning_rate),
command_line, queue_name, gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==125:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for momentum_stat in [0.9,0.99,0.999]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.14 0.05" \
" --size_crops 224 96 --nmb_crops 2 6 --max_scale_crops 1.0 0.14 --type=%d " \
" --local_t=%.4f --align=1 --momentum_stat=%f " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,256,
learning_rate * args.node / 100,
local_t, args.type, local_t, momentum_stat)
write_slurm_sh_faster(
"type%d_momentum%f_lgq_t_%.3f_lr%.4f" % (
args.type, momentum_stat, local_t, learning_rate),
command_line, queue_name, gpu_memory=True)
time.sleep(1)
elif args.type==-108:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for batch_size in [1024]:
for shuffle_mode in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * batch_size/256, args.data, epoch, batch_size,
256,
learning_rate * batch_size/256/ 100,
local_t, abs(args.type), local_t,shuffle_mode)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_lr%.4f" % (args.type,shuffle_mode, local_t, learning_rate*batch_size/256),
command_line, queue_name,gpu_memory=True)
time.sleep(1)
elif args.type==100:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,2,4,8]:
command_line = "python3 main_adco.py --mode=1 --lr=%f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --group_norm_size=%d " % \
(learning_rate/2, args.data, epoch, 128,
128,
learning_rate/ 200,
local_t,args.type, num_roi, crop_size, local_t,group_norm_size)
write_slurm_sh_faster(
"type%d_group%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type,group_norm_size,
local_t, num_roi, crop_size, learning_rate),
command_line, queue_name,gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==101:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_num in [1,2,4,8]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=101 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --group_norm_size=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, group_num)
write_slurm_sh_faster(
"type101_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
group_num, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==102:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [0,1,7]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type,num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type,shuffle_mode,local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.mode==2:
if args.type==58:
for learning_rate in [0.06]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=58 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh_multi("multimoco_proj_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name,
nodes=args.node,gpu_per_node=args.gpu)
elif args.type==59:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [800]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=59 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate*args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate*args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_multi(
"type59_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
elif args.type==61:
for num_roi in ['','6']:
for crop_size in ['','96']:
for learning_rate in [0.04,0.06,0.08]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=61 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --align=1 --ngpu=%d " % \
(learning_rate, args.data, epoch, 256,256,
learning_rate / 100,
local_t, num_roi, crop_size, local_t,args.gpu)
write_slurm_sh_multi(
"type61_lgq_t_%.3f_%s_%s_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==77:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [5]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=77 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_multi(
"type77_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (shuffle_mode,local_t, num_roi, crop_size, learning_rate*args.node),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==87 or args.type==88 or args.type==94:
if args.type==87:
roi_num_list=[32]
elif args.type==88:
roi_num_list = [6,32]
else:
roi_num_list = [0]
for num_roi in roi_num_list:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [800]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 128,
learning_rate * args.node / 100,
local_t,args.type, num_roi, crop_size, local_t)
if args.queue<=1:
write_slurm_sh_multi2(
"type%d_lgq_t_%.3f_%d_%d_lr%.4f_epoch%d" % (args.type,
local_t, num_roi, crop_size,
learning_rate, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"type%d_lgq_t_%.3f_%d_%d_lr%.4f_epoch%d" % (args.type,
local_t, num_roi, crop_size, learning_rate,epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type == 100:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,2,4,8,16]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --group_norm_size=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t,args.type, num_roi, crop_size, local_t,group_norm_size)
if args.node>=4:
command_line += " --warmup_epochs=10 "
if args.queue <= 1:
write_slurm_sh_multi2(
"type%d_group%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type,group_norm_size,
local_t, num_roi, crop_size, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"type%d_group%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type, group_norm_size,
local_t, num_roi, crop_size,
learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==101:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_num in [1,2,4,8,16]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=101 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --group_norm_size=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, group_num)
if args.node >= 4:
command_line += " --warmup_epochs=10 "
if args.queue <= 1:
write_slurm_sh_multi2(
"type101_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
group_num, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"type101_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
group_num, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==119:
for batch_size in [4096]:
#for crop_size in [96]:
if True:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [800]:
for group_num in [1,8,16,32]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * batch_size / 256, args.data, epoch, batch_size,
256,
learning_rate * batch_size / 256 / 100,
local_t, abs(args.type), local_t,group_num)
command_line += " --warmup_epochs=10 "
write_slurm_sh_multi(
"mocov2bigbatch_type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_num, learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu, gpu_memory=True,
environment=1)
elif args.type==115 or args.type==120:
for batch_size in [2048]:
for learning_rate in [0.045]:
for local_t in [0.2]:
for epoch in [800]:
for group_norm_size in [64]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=10 --tensorboard=1 --min_scale_crops 0.14 0.05" \
" --size_crops 224 96 --nmb_crops 2 6 --max_scale_crops 1.0 0.14 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --alpha=%f --use_fp16=1 " % \
(learning_rate * batch_size/256, args.data, epoch, batch_size,
256,
learning_rate * batch_size/256/ 100,
local_t, args.type, local_t,group_norm_size,alpha)
write_slurm_sh_multi(
"multimoco_type%d_%d_alpha%f_lgq_t_%.3f_lr%.4f" % (args.type,group_norm_size,alpha, local_t, learning_rate),
command_line, queue_name,nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==149:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [1000]:
for group_norm_size in [1]:
for num_crops in [4]:
for crop_size in [224]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.2 " \
" --size_crops %d --nmb_crops %d --max_scale_crops 1.0 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
512,
learning_rate * args.node / 100,
local_t, crop_size,num_crops, args.type, local_t, group_norm_size)
write_slurm_sh_multi2(
"mocov2_%dview_type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, num_crops,group_norm_size, learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu, gpu_memory=False,
environment=0)
time.sleep(1)
elif args.type==151:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [1000]:
for group_norm_size in [1]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
" --type=%d --min_scale_crops 0.14 0.05 " \
" --size_crops 224 96 --nmb_crops 4 6 --max_scale_crops 1.0 0.14" \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 --alpha=1.0" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
512,
learning_rate * args.node / 100,
local_t, args.type, local_t, group_norm_size)
write_slurm_sh_multi(
"type%dmultiquery_viewkey_group%d_lgq_t_%.3f_lr%.4f" % (
args.type, group_norm_size, local_t, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.mode==6:
if args.type==0 or args.type==1 or args.type==2 or args.type==3:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [512]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=0.9 " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d " \
% (
args.type, args.data, epoch, batch_size,local_t, num_roi, crop_size, args.node * 64)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,
local_t, num_roi,
crop_size,
epoch),
command_line, queue_name)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"mocov3type%d_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, local_t, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==4 or args.type==5 or args.type==6:
for num_roi in [1]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for group_norm_size in [1,2,4,8]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d " \
% (args.type, args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,group_norm_size)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_%d_%d_epoch%d" % (args.type,
group_norm_size,
learning_rate,
local_t,
num_roi,
crop_size,
epoch),
command_line, queue_name,gpu_memory=True)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,group_norm_size,learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,learning_rate,local_t, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==7 or args.type==8:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for group_norm_size in [1,2,4,8]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
% (args.type, args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,group_norm_size)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_%d_%d_epoch%d" % (args.type,
group_norm_size,
learning_rate,
local_t,
num_roi,
crop_size,
epoch),
command_line, queue_name,gpu_memory=True,environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,group_norm_size,learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,learning_rate,local_t, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==-7:
combine_choice=[1024,16]#[[1024,16],[2048,32],[4096,64]]
for num_roi in [10]:
for crop_size in [96]:
for learning_rate in [0.3]:
for local_t in [1.0]:
for epoch in [1000]:
for batch_size,group_norm_size in combine_choice:
command_line = "python3 main_adco.py --mode=6 --type=7 --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1.5e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.996 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
% ( args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,group_norm_size)
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size, learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size, learning_rate, local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==-13:
combine_choice=[[4096,1],[4096,64]]#[[1024,16],[2048,32],[4096,64]]
for num_roi in [20]:
for crop_size in [96]:
for learning_rate in [0.3]:
for local_t in [1.0]:
for epoch in [1000]:
for batch_size,group_norm_size in combine_choice:
command_line = "python3 main_adco.py --mode=6 --type=13 --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1.5e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.996 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
% ( args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,group_norm_size)
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size, learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size, learning_rate, local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==9 or args.type==10:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for ema_param in [0.001,0.01,0.1]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --momentum_stat=%f --use_fp16=1 " \
% (args.type, args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,ema_param)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%f_%flgq_t_%.3f_%d_%d_epoch%d" % (args.type,
ema_param,
learning_rate,
local_t,
num_roi,
crop_size,
epoch),
command_line, queue_name,gpu_memory=True,environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,group_norm_size,learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,learning_rate,local_t, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==11:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for ema_param in [0.999]:
for group_norm_size in [1,4,8,16]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --momentum_stat=%f --use_fp16=1 --group_norm_size=%d " \
% (args.type, args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,ema_param,group_norm_size)
if args.node == 1:
write_slurm_sh_faster(
"mocov3type%d_%f_%d_%flgq_t_%.3f_%d_%d_epoch%d" % (args.type,
group_norm_size,
ema_param,
learning_rate,
local_t,
num_roi,
crop_size,
epoch),
command_line, queue_name, gpu_memory=True, environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,ema_param, learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,ema_param, learning_rate, local_t, num_roi,
crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==12:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for group_norm_size in [8]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
% (args.type, args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,group_norm_size)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_%d_%d_epoch%d" % (args.type,
group_norm_size,
learning_rate,
local_t,
num_roi,
crop_size,
epoch),
command_line, queue_name,gpu_memory=True,environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,group_norm_size,learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=False,environment=0)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,learning_rate,local_t, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==13 or args.type==14 or args.type==15:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for group_norm_size in [1,4,8,16]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 " \
" --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
% (args.type, args.data, epoch, batch_size, learning_rate,
local_t, args.node * 64, group_norm_size)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_epoch%d" % (args.type,
group_norm_size,
learning_rate,
local_t,
epoch),
command_line, queue_name, gpu_memory=True, environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, learning_rate,
local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=False, environment=0)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==19:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for group_norm_size in [1,4,8,16,32]:
for key_group_norm_size in [1,4,8,16,32]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 " \
" --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --key_group=%d " \
"--use_fp16=1 " \
% (args.type, args.data, epoch, batch_size, learning_rate,
local_t, args.node * 64, group_norm_size,key_group_norm_size)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%d_%flgq_t_%.3f_epoch%d" % (args.type,
group_norm_size,
key_group_norm_size,
learning_rate,
local_t,
epoch),
command_line, queue_name, gpu_memory=True, environment=1)
else:
if args.queue <= 3:
write_slurm_sh_multi2(
"mocov3type%d_%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, key_group_norm_size,learning_rate,
local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=False, environment=0)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, key_group_norm_size,learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==16:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for crop_size in [4,8,16]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 " \
" --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=1 --use_fp16=1 " \
"--nmb_crops %d" \
% (args.type, args.data, epoch, batch_size, learning_rate,
local_t, args.node * 64,crop_size )
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_epoch%d" % (args.type,
crop_size,
learning_rate,
local_t,
epoch),
command_line, queue_name, gpu_memory=True, environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, crop_size, learning_rate,
local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=False, environment=0)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, crop_size, learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==17 or args.type==18:
warmup_epoch=10
for learning_rate in [1.5e-4]:
for local_t in [0.2]:
for epoch in [100]:
for batch_size in [1024]:
if args.type==18:
group_list = [1,2,4,8,16,32,64,128]
else:
group_list = [1]
for group_norm_size in group_list:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=0.1 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f " \
" --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
"--warmup_epochs %d -a vit_small --crop_min 0.08 " \
% (args.type, args.data, epoch, batch_size, learning_rate,
local_t, 256 , group_norm_size,warmup_epoch)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_epoch%d" % (args.type,
group_norm_size,
learning_rate,
local_t,
epoch),
command_line, queue_name, gpu_memory=True, environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, learning_rate,
local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=False, environment=0)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.mode==7:
if args.type==0 or args.type==1 or args.type==2 or args.type==3 or args.type==4:
for num_roi in [16]:
for crop_size in [96]:
for learning_rate in [0.05]:
for barch_size in [512]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=7 --type=%d " \
" --data=%s --epochs=%d --start_epoch=0 --batch_size=%d " \
"--lr=%f --weight_decay=1e-4 --dist_url=tcp://localhost:10031 " \
"--rank=0 --multiprocessing_distributed=1 --world_size=1 " \
"--moco_dim=2048 --mlp_dim=512 --nmb_crops 1 %d --size_crops 224 %d " \
"--align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 --knn_batch_size=%d "\
%(args.type,args.data,epoch,barch_size,learning_rate,num_roi,crop_size,max(64*args.node,256))
if args.node==1:
write_slurm_sh_faster("simsiamtype%d_%d_%d_epoch%d" % (args.type, num_roi, crop_size,
epoch),command_line, queue_name,)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"simsiamtype%d_%d_%d_epoch%d" % (args.type, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"simsiamtype%d_%d_%d_epoch%d" % (args.type, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==5 or args.type==6 or args.type==7 or args.type==8 or args.type==9:
for learning_rate in [0.05]:
for barch_size in [512]:
for epoch in [100]:
for group_norm_size in [1, 2, 4, 8,16,32,64]:
command_line = "python3 main_adco.py --mode=7 --type=%d " \
" --data=%s --epochs=%d --start_epoch=0 --batch_size=%d " \
"--lr=%f --weight_decay=1e-4 --dist_url=tcp://localhost:10031 " \
"--rank=0 --multiprocessing_distributed=1 --world_size=1 " \
"--moco_dim=2048 --mlp_dim=512 --group_norm_size=%d " \
"--align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 --knn_batch_size=%d " \
"--use_fp16=1 " \
% (args.type, args.data, epoch, barch_size, learning_rate,group_norm_size,
max(64 * args.node, 256))
if args.node == 1:
write_slurm_sh_faster("simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size,
epoch), command_line, queue_name,
gpu_memory=True,environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True,environment=1)
else:
write_slurm_sh_multi(
"simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==-6:
for learning_rate in [0.05]:
for barch_size in [256,512]:
for epoch in [800]:
for group_norm_size in [8]:
command_line = "python3 main_adco.py --mode=7 --type=%d " \
" --data=%s --epochs=%d --start_epoch=0 --batch_size=%d " \
"--lr=%f --weight_decay=1e-4 --dist_url=tcp://localhost:10031 " \
"--rank=0 --multiprocessing_distributed=1 --world_size=1 " \
"--moco_dim=2048 --mlp_dim=512 --group_norm_size=%d " \
"--align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 --knn_batch_size=%d " \
"--use_fp16=1 " \
% (abs(args.type), args.data, epoch, barch_size, learning_rate,group_norm_size,
max(64 * args.node, 256))
if args.node == 1:
write_slurm_sh_faster("simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size,
epoch), command_line, queue_name,
gpu_memory=True )
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==10:
for learning_rate in [0.05]:
for barch_size in [512]:
for epoch in [100]:
for crop_size in [4, 8,16]:
command_line = "python3 main_adco.py --mode=7 --type=%d " \
" --data=%s --epochs=%d --start_epoch=0 --batch_size=%d " \
"--lr=%f --weight_decay=1e-4 --dist_url=tcp://localhost:10031 " \
"--rank=0 --multiprocessing_distributed=1 --world_size=1 " \
"--moco_dim=2048 --mlp_dim=512 --nmb_crops %d " \
"--align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 --knn_batch_size=%d " \
"--use_fp16=1 " \
% (args.type, args.data, epoch, barch_size, learning_rate,crop_size,
max(64 * args.node, 256))
if args.node == 1:
write_slurm_sh_faster("simsiamtype%d_%d_epoch%d" % (args.type,crop_size,
epoch), command_line, queue_name,
gpu_memory=True,environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"simsiamtype%d_%d_epoch%d" % (args.type,crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True,environment=1)
else:
write_slurm_sh_multi(
"simsiamtype%d_%d_epoch%d" % (args.type,crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True,environment=1)
time.sleep(1)
elif args.mode==5:
#run swav baseline
if args.type==0:
if args.F is None:
command_line = "python3 main_adco.py --mode=5 --type=0 --data=%s --epochs 200 --lr=0.6 "\
"--lr_final 0.0006 --batch_size=256 --warmup_epochs 0 --freeze_prototypes_niters 5005 "\
"--queue_length 3840 --epoch_queue_starts 15 --dist_url=tcp://localhost:10031 "\
"--knn_batch_size=256 --cos=1 --momentum=0.9 --weight_decay=1e-6 --world_size=1 "\
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 --moco_k=3000 --moco_t=0.1 "\
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 --dump_path %s"%(args.data,dump_path)
write_slurm_sh("swav_baseline" , command_line, queue_name)
else:
args.F= os.path.abspath(args.F)
command_line = "python3 main_adco.py --mode=5 --type=0 --data=%s --epochs 200 --lr=0.6 " \
"--lr_final 0.0006 --batch_size=256 --warmup_epochs 0 --freeze_prototypes_niters 5005 " \
"--queue_length 3840 --epoch_queue_starts 15 --dist_url=tcp://localhost:10031 " \
"--knn_batch_size=256 --cos=1 --momentum=0.9 --weight_decay=1e-6 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 --moco_k=3000 --moco_t=0.1 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--resume=%s --dump_path %s " % (args.data,args.F,dump_path)
resume_name= os.path.split(os.path.abspath(args.F))[1]
write_slurm_sh("swav_baseline_resume%s"%resume_name, command_line, queue_name)
elif args.mode==8:
if args.type==0 or args.type==1:
for epoch in [100]:
for batch_size in [2048]:
for lr_w in [0.2]:
for lr_bias in [0.0048]:
for alpha in [0.51]:
command_line="python3 main.py %s --epochs=%d " \
"--batch-size=%d --learning-rate-weights=%f --learning-rate-biases=%f " \
"--weight-decay=1e-6 --lambd=%f --type=%d --knn_neighbor=20 " \
"--knn_freq=1 --knn_batch_size=%d --tensorboard=1 "%(args.data,epoch,
batch_size,lr_w,lr_bias,alpha,args.type,256 )
if args.node==1:
write_slurm_sh_faster("BTtype%d_%d_epoch%d" % (args.type,batch_size,epoch), command_line, queue_name,
gpu_memory=False, environment=0)
else:
write_slurm_sh_multi2(
"BTtype%d_%d_epoch%d" % (args.type, batch_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=False, environment=0)
elif args.type==2:
for epoch in [100]:
for batch_size in [1024]:
for lr_w in [0.2]:
for lr_bias in [0.0048]:
for alpha in [0.51]:
for group_size in [2,4,8,16,32]:
command_line = "python3 main.py %s --epochs=%d " \
"--batch-size=%d --learning-rate-weights=%f --learning-rate-biases=%f " \
"--weight-decay=1e-6 --lambd=%f --type=%d --knn_neighbor=20 " \
"--knn_freq=1 --knn_batch_size=%d --tensorboard=1 --group_norm_size=%d " % (args.data, epoch,
batch_size, lr_w,
lr_bias, alpha,
args.type, 256,group_size)
write_slurm_sh_faster("BTtype%d_%d_%d_epoch%d" % (args.type,group_size, batch_size,epoch), command_line, queue_name,
gpu_memory=False, environment=0)
elif args.mode==0:
#used for finetuning, which will submit finetune jobs and a comment for which
use_bn=args.type
for lr in [20]:
for weight_decay in [1e-6,1e-7,1e-8,1e-9]:
command_line = "python3 lincls.py --data=%s --dist-url=tcp://localhost:10031 " \
"--pretrained='%s' --lr=%.4f --final_lr=%.8f --dataset=ImageNet --use_bn=%d --wd %.8f" % (
args.data, args.F, lr, lr / 100, use_bn,weight_decay)
write_slurm_sh("linear_eval_%s_%.4f_bn%d_wd_%f" % (args.comment, lr, use_bn,weight_decay), command_line, queue_name)
time.sleep(1)
elif args.mode==-2:
use_bn = args.type
#type 3:l2 norm linear
for lr in [1.0]:
for weight_decay in [1e-5,1e-6,1e-7,1e-8,1e-9]:
command_line = "python3 lincls.py --data=%s --dist-url=tcp://localhost:10031 --batch-size=4096 " \
"--pretrained='%s' --lr=%.4f --final_lr=%.8f --dataset=ImageNet --use_bn=%d --wd %.8f" % (
args.data, args.F, lr, lr / 100, use_bn, weight_decay)
write_slurm_sh("linearb4096_eval_%s_%.4f_bn%d_wd_%.8f" % (args.comment, lr, use_bn, weight_decay), command_line,
queue_name)
elif args.mode==-1:
command_line = "python3 encode.py --data=%s --dist-url=tcp://localhost:10031 " \
"--pretrained='%s' --dataset=ImageNet " % (args.data, args.F)
write_slurm_sh("encode_%s" % (args.comment), command_line, queue_name)
elif args.mode==-3:
command_line = "python3 main_adco.py --sym=0 --lr=0.03 --memory_lr=3 --moco_t=0.12 " \
"--mem_t=0.02 --data=%s --dist_url=tcp://localhost:10001 --mode=0 " \
"--epochs=200 --moco_dim=128 --moco_m=0.999 --moco_k=65536 --cluster=65536 " \
"--knn_neighbor=20 --knn_freq=1 --data=imagenet --batch_size=256 --ad_init=1 "%(args.data)
write_slurm_sh("type0",command_line,queue_name)
elif args.mode==-4:
use_bn = args.type
vit_model =True
for lr in [0.05,0.1]:
for weight_decay in [0]:
for model_type in [0]:
command_line ="python lincls_lars.py -a resnet50 --dist-url 'tcp://localhost:10001' " \
"--multiprocessing-distributed --world-size 1 --rank 0 --pretrained='%s' --lr %f --wd %f " \
"--lars --data %s --use_bn=%d --model_type=%d "%(args.F,lr,
weight_decay,args.data,use_bn,model_type)
if vit_model:
command_line +=" --arch vit_small"
write_slurm_sh("linear_larsb4096_eval_%s_bn%d_%.4f_wd_%.8f" % (args.comment, use_bn,lr,weight_decay),
command_line,
queue_name)
elif args.mode==-40:
use_bn = args.type
study_dir = os.path.abspath(args.F)
checkpoint_name = "checkpoint_0099.pth.tar"
for item in os.listdir(study_dir):
if item== checkpoint_name:
current_model_path = os.path.join(study_dir,item)
current_dir = study_dir
current_comment = os.path.split(current_dir)[1]
else:
current_dir = os.path.join(study_dir,item)
current_comment = os.path.split(current_dir)[1]
current_model_path = find_checkpoint(current_dir,checkpoint_name)
if current_model_path is None:
print("%s dir did not find checkpoint"%current_dir)
continue
if not os.path.exists(current_model_path):
print("%s model path did not exist"%current_model_path)
continue
print("fintune %s model"%current_model_path)
for lr in [0.05, 0.1]:
for weight_decay in [0]:
for model_type in [0]:
command_line = "python lincls_lars.py -a resnet50 --dist-url 'tcp://localhost:10001' " \
"--multiprocessing-distributed --world-size 1 --rank 0 --pretrained='%s' --lr %f --wd %f " \
"--lars --data %s --use_bn=%d --model_type=%d " % (current_model_path, lr,
weight_decay, args.data, use_bn,
model_type)
write_slurm_sh(
"linear_larsb4096_eval_%s_bn%d_%.4f_wd_%.8f" % (str(args.comment)+current_comment, use_bn, lr, weight_decay),
command_line,
queue_name)
elif args.mode==-5:
config_dict={}
config_path = os.path.join(os.getcwd(),"detection")
config_path = os.path.join(config_path,"configs")
config_dict['VOC']=os.path.join(config_path,"pascal_voc_R_50_C4_24k_loco.yaml")
config_dict['VOC_freeze'] = os.path.join(config_path, "pascal_voc_R_50_C4_24k_loco_freeze.yaml")
config_dict['COCO'] = os.path.join(config_path,"coco_R_50_C4_2x.yaml_loco.yaml")
config_dict['COCO_freeze'] =os.path.join(config_path,"coco_R_50_C4_2x.yaml_loco_freeze.yaml")
model_path = os.path.abspath(args.F)
model_name = os.path.split(model_path)[1].replace(".pkl","")
for kk in range(5):
for config_now in ['VOC','VOC_freeze']:
command_line = "python detection/train_net.py --config-file %s --num-gpus 8" \
" MODEL.WEIGHTS %s"%(config_dict[config_now],args.F)
write_slurm_sh_faster("detection_%s_run%d_%s" % (config_now, kk,model_name),
command_line, queue_name, gpu_memory=True)
for config_now in ['COCO',"COCO_freeze"]:
command_line = "python detection/train_net.py --config-file %s --num-gpus 8" \
" MODEL.WEIGHTS %s" % (config_dict[config_now], args.F)
write_slurm_sh_faster("detection_%s_%s" % (config_now, model_name),
command_line, queue_name, gpu_memory=True)
elif args.mode==-6:
#finetune with mocov3 protocol
for lr in [0.03,0.06,0.1,0.15,0.12]:
for weight_decay in [0]:
command_line ="python main_lincls.py -a resnet50 --dist-url 'tcp://localhost:10001' " \
"--multiprocessing-distributed --world-size 1 --rank 0 --pretrained='%s' --lr %f --wd %f " \
" %s "%(args.F,lr,weight_decay,args.data)
write_slurm_sh("linear_main_lincls_%s_%.4f_wd_%.8f" % (args.comment, lr,weight_decay),
command_line,
queue_name)
| 72.542391
| 156
| 0.428224
|
import os
import argparse
from ops.os_operation import mkdir
import time
def write_slurm_sh_multi_H2(id,command_line, queue_name="learnfair",nodes=1,
gpu_per_node=8,wall_time=3*24*60,username="wang3702",CPU_PER_GPU=8):
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dependency_handler_path = os.path.join(os.getcwd(), "ops")
dependency_handler_path = os.path.join(dependency_handler_path, "handler.txt")
run_path = os.path.join(os.getcwd(), "log")
mkdir(run_path)
run_path = os.path.abspath(run_path)
prefix = "node%d_gpu%d"%(nodes,gpu_per_node)
batch_file = os.path.join(run_path, prefix+"slurm_job_" + str(id) + ".sh")
output_path = os.path.join(run_path, prefix+"output_" + str(id) + "_" + str(formatted_today + now) + ".log")
error_path = os.path.join(run_path, prefix+"error_" + str(id) + "_" + str(formatted_today + now) + ".log")
with open(batch_file, "w") as file:
file.write("#! /bin/bash\n")
file.write("#SBATCH --job-name=%s\n" % id)
file.write("#SBATCH --output=%s\n" % output_path)
file.write("#SBATCH --error=%s\n" % error_path)
file.write("#SBATCH --partition=%s\n"%queue_name)
file.write("#SBATCH --signal=USR1@600\n")
file.write("#SBATCH --nodes=%d\n" % nodes)
file.write("#SBATCH --ntasks-per-node=%d\n" % 1)
file.write("#SBATCH --mem=%dG\n"%(350/8*gpu_per_node))
file.write("#SBATCH --gpus=%d\n" % (nodes * gpu_per_node))
file.write("#SBATCH --gpus-per-node=%d\n" % (gpu_per_node))
file.write("#SBATCH --cpus-per-task=%d\n"%(CPU_PER_GPU*gpu_per_node))
file.write("#SBATCH --time=%d\n"%wall_time)
file.write("#SBATCH --mail-user=%s@fb.com\n"%username)
file.write("#SBATCH --mail-type=FAIL\n")
file.write("#SBATCH --mail-type=end \n")
file.write('#SBATCH --constraint="volta"\n')
report_info = "%s job failed; \t" % id
report_info += "log path: %s; \t" % output_path
report_info += "error record path: %s\t" % error_path
report_info += "command line path: %s\t" % batch_file
file.write('#SBATCH --comment="%s"\n' % (report_info))
with open(dependency_handler_path, 'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
file.write("export GLOO_SOCKET_IFNAME=\nexport NCCL_SOCKET_IFNAME=\n")
file.write("module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n")
file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
file.write("conda activate pytorch2\n")
file.write("master_node=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:4}\n")
file.write('dist_url="tcp://"\n')
file.write("dist_url+=$master_node\n")
file.write("dist_url+=:40000\n")
file.write("export MASTER_ADDR=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:4}\n")
file.write("export MASTER_PORT=29500\n")
file.write("srun --label "+command_line + " --slurm=1 --dist_url=$dist_url &\n")
file.write("wait $!\n")
file.write("set +x \n")
file.write("echo ..::Job Finished, but No, AGI is to BE Solved::.. \n")
os.system('sbatch ' + batch_file)
def find_checkpoint(current_dir,checkpoint_name):
if not os.path.isdir(current_dir):
return None
listfiles = os.listdir(current_dir)
for item in listfiles:
sub_dir = os.path.join(current_dir,item)
if item==checkpoint_name:
return sub_dir
elif os.path.isdir(sub_dir):
search_result = find_checkpoint(sub_dir,checkpoint_name)
if search_result is not None:
return search_result
return None
def write_slurm_sh_multi(id,command_line, queue_name="learnfair",nodes=1,
gpu_per_node=8,wall_time=3*24*60,username="wang3702",
CPU_PER_GPU=8,gpu_memory=False,environment=0):
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dependency_handler_path = os.path.join(os.getcwd(), "ops")
dependency_handler_path = os.path.join(dependency_handler_path, "handler.txt")
run_path = os.path.join(os.getcwd(), "log")
mkdir(run_path)
run_path = os.path.abspath(run_path)
prefix = "node%d_gpu%d"%(nodes,gpu_per_node)
batch_file = os.path.join(run_path, prefix+"slurm_job_" + str(id) + ".sh")
output_path = os.path.join(run_path, prefix+"output_" + str(id) + "_" + str(formatted_today + now) + ".log")
error_path = os.path.join(run_path, prefix+"error_" + str(id) + "_" + str(formatted_today + now) + ".log")
with open(batch_file, "w") as file:
file.write("#! /bin/bash\n")
file.write("#SBATCH --job-name=%s\n" % id)
file.write("#SBATCH --output=%s\n" % output_path)
file.write("#SBATCH --error=%s\n" % error_path)
file.write("#SBATCH --partition=%s\n"%queue_name)
file.write("#SBATCH --signal=USR1@600\n")
file.write("#SBATCH --nodes=%d\n" % nodes)
file.write("#SBATCH --ntasks-per-node=%d\n" % 1)
file.write("#SBATCH --mem=%dG\n"%(350/8*gpu_per_node))
file.write("#SBATCH --gpus=%d\n" % (nodes * gpu_per_node))
file.write("#SBATCH --gpus-per-node=%d\n" % (gpu_per_node))
file.write("#SBATCH --cpus-per-task=%d\n"%(CPU_PER_GPU*gpu_per_node))
file.write("#SBATCH --time=%d\n"%wall_time)
file.write("#SBATCH --mail-user=%s@fb.com\n"%username)
file.write("#SBATCH --mail-type=FAIL\n")
file.write("#SBATCH --mail-type=end \n")
if gpu_memory is False:
file.write('#SBATCH --constraint="volta"\n')
else:
file.write('#SBATCH --constraint="volta32gb"\n')
report_info = "%s job failed; \t" % id
report_info += "log path: %s; \t" % output_path
report_info += "error record path: %s\t" % error_path
report_info += "command line path: %s\t" % batch_file
file.write('#SBATCH --comment="%s"\n' % (report_info))
with open(dependency_handler_path, 'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
file.write("export GLOO_SOCKET_IFNAME=\nexport NCCL_SOCKET_IFNAME=\n")
file.write("module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n")
file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
if environment==0:
file.write("conda activate pytorch2\n")
else:
file.write("conda activate pytorch\n")
file.write("master_node=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:4}\n")
file.write('dist_url="tcp://"\n')
file.write("dist_url+=$master_node\n")
file.write("dist_url+=:40000\n")
file.write("export MASTER_ADDR=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:4}\n")
file.write("export MASTER_PORT=29500\n")
file.write("srun --label "+command_line + " --slurm=1 --dist_url=$dist_url &\n")
file.write("wait $!\n")
file.write("set +x \n")
file.write("echo ..::Job Finished, but No, AGI is to BE Solved::.. \n")
os.system('sbatch ' + batch_file)
def write_slurm_sh_multi2(id,command_line, queue_name="learnfair",nodes=1,
gpu_per_node=8,wall_time=3*24*60,username="wang3702",CPU_PER_GPU=8,
gpu_memory=False,environment=0):
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dependency_handler_path = os.path.join(os.getcwd(), "ops")
dependency_handler_path = os.path.join(dependency_handler_path, "handler.txt")
run_path = os.path.join(os.getcwd(), "log")
mkdir(run_path)
run_path = os.path.abspath(run_path)
prefix = "node%d_gpu%d"%(nodes,gpu_per_node)
batch_file = os.path.join(run_path, prefix+"slurm_job_" + str(id) + ".sh")
output_path = os.path.join(run_path, prefix+"output_" + str(id) + "_" + str(formatted_today + now) + ".log")
error_path = os.path.join(run_path, prefix+"error_" + str(id) + "_" + str(formatted_today + now) + ".log")
with open(batch_file, "w") as file:
file.write("#! /bin/bash\n")
file.write("#SBATCH --job-name=%s\n" % id)
file.write("#SBATCH --output=%s\n" % output_path)
file.write("#SBATCH --error=%s\n" % error_path)
file.write("#SBATCH --partition=%s\n"%queue_name)
file.write("#SBATCH --signal=USR1@600\n")
file.write("#SBATCH --nodes=%d\n" % nodes)
file.write("#SBATCH --ntasks-per-node=%d\n" % 1)
file.write("#SBATCH --mem=%dG\n"%(350/8*gpu_per_node))
file.write("#SBATCH --gpus=%d\n" % (nodes * gpu_per_node))
file.write("#SBATCH --gpus-per-node=%d\n" % (gpu_per_node))
file.write("#SBATCH --cpus-per-task=%d\n"%(CPU_PER_GPU*gpu_per_node))
file.write("#SBATCH --time=%d\n"%wall_time)
file.write("#SBATCH --mail-user=%s@fb.com\n"%username)
file.write("#SBATCH --mail-type=FAIL\n")
file.write("#SBATCH --mail-type=end \n")
if gpu_memory is False:
file.write('#SBATCH --constraint="volta"\n')
else:
file.write('#SBATCH --constraint="volta32gb"\n')
report_info = "%s job failed; \t" % id
report_info += "log path: %s; \t" % output_path
report_info += "error record path: %s\t" % error_path
report_info += "command line path: %s\t" % batch_file
file.write('#SBATCH --comment="%s"\n' % (report_info))
with open(dependency_handler_path, 'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
file.write("export GLOO_SOCKET_IFNAME=\nexport NCCL_SOCKET_IFNAME=\n")
file.write("module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n")
file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
if environment==0:
file.write("conda activate pytorch2\n")
else:
file.write("conda activate pytorch\n")
file.write("master_node=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:3}\n")
file.write('dist_url="tcp://"\n')
file.write("dist_url+=$master_node\n")
file.write("dist_url+=:40000\n")
file.write("export MASTER_ADDR=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:3}\n")
file.write("export MASTER_PORT=29500\n")
file.write("srun --label "+command_line + " &\n")
file.write("wait $!\n")
file.write("set +x \n")
file.write("echo ..::Job Finished, but No, AGI is to BE Solved::.. \n")
os.system('sbatch ' + batch_file)
def write_slurm_sh_faster(id,command_line, queue_name="learnfair",nodes=1,
gpu_per_node=8,wall_time=3*24*60,username="wang3702",CPU_PER_GPU=8,
gpu_memory=False,environment=0):
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dependency_handler_path = os.path.join(os.getcwd(), "ops")
dependency_handler_path = os.path.join(dependency_handler_path, "handler.txt")
run_path = os.path.join(os.getcwd(), "log")
mkdir(run_path)
run_path = os.path.abspath(run_path)
batch_file = os.path.join(run_path, "slurm_job_" + str(id) + ".sh")
output_path = os.path.join(run_path, "output_" + str(id) + "_" + str(formatted_today + now) + ".log")
error_path = os.path.join(run_path, "error_" + str(id) + "_" + str(formatted_today + now) + ".log")
with open(batch_file, "w") as file:
file.write("#!/bin/bash\n")
file.write("#SBATCH --job-name=%s\n" % id)
file.write("#SBATCH --output=%s\n" % output_path)
file.write("#SBATCH --error=%s\n" % error_path)
file.write("#SBATCH --partition=%s\n"%queue_name)
file.write("#SBATCH --signal=USR1@600\n")
file.write("#SBATCH --nodes=%d\n" % nodes)
file.write("#SBATCH --ntasks-per-node=%d\n" % gpu_per_node)
file.write("#SBATCH --mem=%dG\n"%(int(350/8*gpu_per_node)))
file.write("#SBATCH --gpus=%d\n" % (nodes * gpu_per_node))
file.write("#SBATCH --cpus-per-task=%d\n"%(CPU_PER_GPU))
file.write("#SBATCH --time=%d\n"%wall_time)
file.write("#SBATCH --mail-user=%s@fb.com\n"%username)
file.write("#SBATCH --mail-type=FAIL\n")
file.write("#SBATCH --mail-type=end \n")
if gpu_memory:
file.write('#SBATCH --constraint="volta32gb"\n')
else:
file.write('#SBATCH --constraint="volta"\n')
report_info = "%s job failed; \t" % id
report_info += "log path: %s; \t" % output_path
report_info += "error record path: %s\t" % error_path
report_info += "command line path: %s\t" % batch_file
file.write('#SBATCH --comment="%s"\n' % (report_info))
with open(dependency_handler_path, 'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
file.write("module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n")
file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
if environment==0:
file.write("conda activate pytorch2\n")
else:
file.write("conda activate pytorch\n")
file.write(command_line + " &\n")
file.write("wait $!\n")
file.write("set +x \n")
file.write("echo ..::Job Finished, but No, AGI is to BE Solved::.. \n")
os.system('sbatch ' + batch_file)
def write_slurm_sh(id,command_line, queue_name="learnfair",nodes=1,
gpu_per_node=8,wall_time=3*24*60,username="wang3702",CPU_PER_GPU=10):
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dependency_handler_path = os.path.join(os.getcwd(),"ops")
dependency_handler_path = os.path.join(dependency_handler_path,"handler.txt")
run_path = os.path.join(os.getcwd(),"log")
mkdir(run_path)
run_path = os.path.abspath(run_path)
batch_file = os.path.join(run_path,"slurm_job_"+str(id)+".sh")
output_path = os.path.join(run_path,"output_"+str(id)+"_"+str(formatted_today+now)+".log")
error_path = os.path.join(run_path,"error_"+str(id)+"_"+str(formatted_today+now)+".log")
with open(batch_file,"w") as file:
file.write("#!/bin/sh\n")
file.write("#SBATCH --job-name=%s\n"%id)
file.write("#SBATCH --output=%s\n"%output_path)
file.write("#SBATCH --error=%s\n"%error_path)
file.write("#SBATCH --partition=%s\n"%queue_name)
file.write("#SBATCH --signal=USR1@600\n")
file.write("#SBATCH --nodes=%d\n"%nodes )
file.write("#SBATCH --ntasks-per-node=1\n")
file.write("#SBATCH --mem=350G\n")
file.write("#SBATCH --gpus=%d\n"%(nodes*gpu_per_node))
file.write("#SBATCH --gpus-per-node=%d\n" % (gpu_per_node))
file.write("#SBATCH --cpus-per-task=%d\n"%(CPU_PER_GPU*gpu_per_node))
file.write("#SBATCH --time=%d\n"%wall_time)
file.write("#SBATCH --mail-user=%s@fb.com\n"%username)
file.write("#SBATCH --mail-type=FAIL\n")
file.write("#SBATCH --mail-type=end \n")
file.write('#SBATCH --constraint="volta"\n')
report_info ="%s job failed; \t"%id
report_info += "log path: %s; \t"%output_path
report_info += "error record path: %s\t"%error_path
report_info += "command line path: %s\t"%batch_file
file.write('#SBATCH --comment="%s"\n'%(report_info))
with open(dependency_handler_path,'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
file.write("module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n")
file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
file.write("conda activate pytorch2\n")
file.write(command_line+" &\n")
file.write("wait $!\n")
file.write("set +x \n")
file.write("echo ..::Job Finished, but No, AGI is to BE Solved::.. \n")
os.system('sbatch ' + batch_file)
parser = argparse.ArgumentParser(description='slurm job submission')
parser.add_argument('--data', default="imagenet", type=str, metavar='DIR',
help='path to dataset')
parser.add_argument("--mode",type=int,default=0,help="control mode for training")
parser.add_argument("--type",type=int,default=0,help="running type control")
parser.add_argument("--roi",type=int,default = 20, help="number of rois sampled here")
parser.add_argument("--queue",type=int,default=0, help="queue specified list")
parser.add_argument("-F",type=str, default=None, help="resume path for running again")
parser.add_argument("--comment", type=str,default=None,help="adding comment for script names")
parser.add_argument("--node",type=int,default=1,help="nodes needed for training")
parser.add_argument("--gpu",type=int,default=8,help="number of gpus per node")
args = parser.parse_args()
if args.queue ==0:
queue_name = "learnfair"
elif args.queue ==1:
queue_name = "dev"
elif args.queue ==2:
queue_name = "scavenge"
elif args.queue ==3:
queue_name = 'priority'
elif args.queue ==4:
queue_name = 'learnlab'
elif args.queue==5:
queue_name = 'devlab'
elif args.queue==6:
queue_name = 'prioritylab'
dump_path= os.path.join(os.getcwd(),"swav_dump_100")
from ops.os_operation import mkdir
mkdir(dump_path)
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dump_path = os.path.join(dump_path, formatted_today + now)
if args.mode==1:
if args.type==0:
command_line = "python3 main_adco.py --mode=1 --lr=0.06 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0006 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=16 --mask_size=32 " \
"--num_roi=1 " % args.data
write_slurm_sh("baseline_sym_moco_lr0.06", command_line, queue_name)
elif args.type==10:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=10 " % args.data
if args.F is not None:
resume_name = os.path.split(os.path.abspath(args.F))[1]
command_line += "--resume=%s"%args.F
write_slurm_sh("halfdropoutnew_resume%s"%resume_name, command_line, queue_name)
else:
write_slurm_sh("halfdropoutnew", command_line, queue_name)
elif args.type==11:
for mask_size in [96, 160]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=11 --shift_ratio=0 " \
" --mask_size=%d " % (args.data,mask_size)
write_slurm_sh("type11_roimatch_%s"%mask_size, command_line, queue_name)
elif args.type==13:
for mask_size in [96,160]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=13 " \
"--mask_size=%d "%(args.data,mask_size)
write_slurm_sh("type13_singleroi_vs_global_%d"%mask_size,command_line,queue_name)
time.sleep(1)
elif args.type==14:
for mask_size in [96,160]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=14 " \
"--mask_size=%d "%(args.data,mask_size)
write_slurm_sh("type14_singleroi_vs_global_%d"%mask_size,command_line,queue_name)
elif args.type==16:
for mask_size in [96,128,160]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=16 " \
"--mask_size=%d --num_roi=10 "%(args.data,mask_size)
write_slurm_sh("type16_roi+global_vs_global_%d"%mask_size,command_line,queue_name)
elif args.type==-16:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=16 --mask_size=32 --num_roi=1 " % args.data
if args.F is not None:
resume_name = os.path.split(os.path.abspath(args.F))[1]
command_line += " --resume=%s"%args.F
write_slurm_sh("baseline_sym_moco_resume%s"%resume_name, command_line, queue_name)
else:
write_slurm_sh("baseline_sym_moco", command_line,queue_name)
elif args.type==17:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=17 --mask_size=32" \
" --num_roi=%d" % (args.data,args.roi)
write_slurm_sh("type17_randroi_%d"%args.roi, command_line,queue_name)
elif args.type==-17:
for roi in [10,20,50,100]:
for mask_size in [32, 96, 160, 196]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=17 --mask_size=%d" \
" --num_roi=%d" % (args.data,mask_size, roi)
write_slurm_sh("type17_randroi_%d_masksize_%d" % (roi,mask_size), command_line,queue_name)
elif args.type==18:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=18 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 "% (args.data)
if args.F is not None:
resume_name = os.path.split(os.path.abspath(args.F))[1]
command_line += "--resume=%s"%args.F
write_slurm_sh("multi_moco_baseline_resume%s"%resume_name, command_line, queue_name)
else:
write_slurm_sh("multi_moco_baseline" , command_line, queue_name)
elif args.type==19:
for roi in [20]:
for mask_size in [32,160]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=19 --mask_size=%d" \
" --num_roi=%d" % (args.data,mask_size, roi)
write_slurm_sh("type19_randroi_%d_masksize_%d" % (roi,mask_size), command_line,queue_name)
elif args.type==20:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=20 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 "% (args.data)
if args.F is not None:
resume_name = os.path.split(os.path.abspath(args.F))[1]
command_line += " --resume=%s"%args.F
write_slurm_sh("2key_multi_moco_baseline_correct_resume%s"%resume_name, command_line, queue_name)
else:
write_slurm_sh("2key_multi_moco_baseline_correct", command_line, queue_name)
elif args.type==21:
for roi in [20]:
for mask_size in [96]:
command_line = "python3 main_adco.py --mode=1 --lr=0.09 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=768 --knn_batch_size=256 --cos=1 --lr_final=0.0009 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=21 --mask_size=%d" \
" --num_roi=%d" % (args.data,mask_size, roi)
write_slurm_sh("type21_randroi_%d_masksize_%d" % (roi,mask_size), command_line,queue_name)
elif args.type==22:
for roi in [50]:
for mask_size in [96]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=22 --mask_size=%d" \
" --num_roi=%d" % (args.data, mask_size, roi)
write_slurm_sh("type22_randroi_%d_masksize_%d" % (roi,mask_size), command_line,queue_name)
elif args.type==23:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=23 --nmb_crops 2 2 2 2 2 2 2 2" \
" --size_crops 96 112 128 144 160 176 192 208 " % args.data
write_slurm_sh("type23_specifyroi", command_line, queue_name)
elif args.type==-23:
min_scale = 64
max_scale = 224
divide_list = [2,4,8,16,32]
pick_times = [1,2,3]
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale<max_scale:
check_list+=str(current_scale)+" "
num_list+=str(pick_time)+" "
current_scale+=divide
print(check_list)
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=23 --nmb_crops %s " \
" --size_crops %s " % (args.data,num_list,check_list)
write_slurm_sh("type23_specifyroi_%d_%d"%(pick_time,divide), command_line, queue_name)
elif args.type==24:
for alpha in [0.5, 1.0, 2.0]:
for local_t in [0.1,0.2,0.3]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=24 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=1.0 " % (args.data,local_t)
write_slurm_sh("type24_lg_t_%.3f_alpha_%.2f"%(local_t,alpha), command_line, queue_name)
elif args.type==25:
for alpha in [0.5]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=24 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % (args.data, local_t,alpha)
write_slurm_sh("type25_lgq_t_%.3f_alpha_%.2f" %(local_t,alpha), command_line, queue_name)
elif args.type==26:
for alpha in [0.5,1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=26 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % (args.data, local_t,alpha)
write_slurm_sh("type26_lgq_t_%.3f_alpha_%.2f" %(local_t,alpha), command_line, queue_name)
elif args.type == 27:
min_scale = 96
max_scale = 224
divide_list = [16]
pick_times = [1]
for learning_rate in [0.05]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0.1,0.15,0.2,0.3]:
for local_t in [0.12,0.15,0.18]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=27 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f " % \
(learning_rate,args.data, local_t,num_list, check_list, local_t, alpha)
write_slurm_sh("type27_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, pick_time, divide,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == -270:
for num_roi in [6,10,20,30]:
for crop_size in [64, 96, 128, 160, 192]:
for learning_rate in [0.05]:
for alpha in [0.1]:
for local_t in [0.18]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=27 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh(
"type27crop_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==-271:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0.1]:
for local_t in [0.18,0.2]:
for moco_dim in [256,512]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=%d " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=27 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data,moco_dim, local_t, num_roi, crop_size, local_t, alpha)
write_slurm_sh(
"type27dim_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f_dim%d" % (
local_t, alpha, num_roi, crop_size, learning_rate,moco_dim),
command_line, queue_name)
time.sleep(1)
elif args.type == -27:
for learning_rate in [0.05]:
for alpha in [0.1]:
for local_t in [0.18]:
for moco_dim in [128,256,512]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=27 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type27baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 28:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=28 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " % (args.data)
write_slurm_sh("type28_small_inside", command_line, queue_name)
elif args.type==29:
for learning_rate in [0.03]:
for alpha in [0.5,1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.2f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.5f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=29 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " \
"" % (learning_rate,args.data, learning_rate/100,local_t, alpha)
write_slurm_sh("type29_lgq_t_%.3f_alpha_%.2f_lr_%.4f" % (local_t, alpha,learning_rate), command_line, queue_name)
elif args.type==30:
for learning_rate in [0.03]:
for alpha in [0.5,1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.2f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.5f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=30 --nmb_crops 6 " \
" --size_crops 96 --local_t=%.4f --alpha=%.2f " \
"" % (learning_rate,args.data, learning_rate/100,local_t, alpha)
write_slurm_sh("type30_lgq_t_%.3f_alpha_%.2f_lr_%.4f" % (local_t, alpha,learning_rate), command_line, queue_name)
elif args.type==31:
for learning_rate in [0.03]:
for alpha in [0.5]:
for local_t in [0.2]:
for num_roi in [5, 10, 20]:
for mask_size in [96]:
command_line = "python3 main_adco.py --mode=1 --lr=%.2f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.5f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=31 " \
"--local_t=%.4f --alpha=%.2f --num_roi=%d --mask_size=%d " \
"" % (learning_rate, args.data, learning_rate / 100,
local_t, alpha,num_roi,mask_size)
write_slurm_sh("type31_lgq_t_%.3f_alpha_%.2f_lr_%.4f_roi%d_mask%d" %
(local_t, alpha, learning_rate,num_roi,mask_size),
command_line, queue_name)
elif args.type==32:
for learning_rate in [0.03]:
for alpha in [0.5]:
for local_t in [0.2]:
for num_roi in [5, 10, 20]:
for mask_size in [96]:
command_line = "python3 main_adco.py --mode=1 --lr=%.2f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.5f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=32 " \
"--local_t=%.4f --alpha=%.2f --num_roi=%d --mask_size=%d " \
"" % (learning_rate, args.data, learning_rate / 100,
local_t, alpha,num_roi,mask_size)
write_slurm_sh("type32_lgq_t_%.3f_alpha_%.2f_lr_%.4f_roi%d_mask%d" %
(local_t, alpha, learning_rate,num_roi,mask_size),
command_line, queue_name)
elif args.type==33:
for learning_rate in [0.03,0.04,0.05,0.06,0.09,0.12]:
for alpha in [0.5,1.0,2.0,5.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=33 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh("multimoco_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name)
elif args.type==-28:
for learning_rate in [0.06]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=28 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh("multimocoinside_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name)
elif args.type==34:
min_scale = 96
max_scale = 224
divide_list = [16]
pick_times = [1]
for learning_rate in [0.04, 0.05]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0.1, 0.3, 0.5,1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=34 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, num_list, check_list, local_t, alpha)
write_slurm_sh("type34_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, pick_time, divide, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 36:
min_scale = 96
max_scale = 224
divide_list = [16]
pick_times = [1]
for learning_rate in [0.04,0.05]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0.1]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=36 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f " % \
(learning_rate,args.data, local_t,num_list, check_list, local_t, alpha)
write_slurm_sh("type36_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, pick_time, divide,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==37:
for learning_rate in [0.03,0.04,0.05,0.06]:
for alpha in [0.1,0.3,0.5,1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=37 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type37baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==38:
min_scale = 96
max_scale = 224
divide_list = [16]
pick_times = [1]
for learning_rate in [0.05]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0]: for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=38 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,"", "", local_t, alpha)
write_slurm_sh("type38_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, pick_time, divide, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==-38:
for learning_rate in [0.05]:
for alpha in [0.1,0.3,0.5,1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=38 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type38baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==39:
for learning_rate in [0.05]:
for alpha in [0.1]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=39 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type39baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==40:
for learning_rate in [0.05]:
for alpha in [0.5]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=40 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type40baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==41:
for mask_size in [96]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=41 " \
"--mask_size=%d "%(args.data,mask_size)
write_slurm_sh("type41_singleroi_vs_global_%d"%mask_size,command_line,queue_name)
elif args.type==42:
for learning_rate in [0.05]:
for alpha in [0.1,0.5]:
for local_t in [0.15,0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=42 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type42baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==43:
for learning_rate in [0.05]:
for alpha in [0.1,0.5]:
for local_t in [0.15,0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=43 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type43baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 44:
for num_roi in [6]:
for crop_size in [96,192]:
for learning_rate in [0.03,0.05,0.06]:
for alpha in [0.1,0.3,0.5,1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=44 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh(
"type44_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==-44:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0.1,0.5]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=44 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh(
"type44align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==45 or args.type==46:
for crop_size in [96]:
for learning_rate in [0.03,0.04,0.05]:
for alpha in [0.1,0.3,0.5,1,2]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --mask_size %d" \
" --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t, args.type, crop_size,local_t, alpha)
write_slurm_sh(
"type%d_crop_lgq_t_%.3f_alpha_%.2f_%d_lr%.4f" % (args.type, local_t,alpha,
crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type ==47:
min_scale = 96
max_scale = 224
divide_list = [16]
pick_times = [1]
for learning_rate in [0.03,0.05]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0.1,0.5,1.0]: for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=47 " \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t, check_list, local_t, alpha)
write_slurm_sh("type47_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, pick_time, divide, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type ==49:
min_scale = 96
max_scale = 224
divide_list = [2,4,8,16,32]
pick_times = [1]
for learning_rate in [0.06]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=49 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data, local_t, num_list,check_list, local_t, alpha)
write_slurm_sh_faster(
"type49crop_lgq_t_%.3f_alpha_%.2f_divide%d_lr%.4f" % (
local_t, alpha, divide, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==-49:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [-0.1,-0.3,-0.5,-1]:
for local_t in [0.18]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=49 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type49align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==50:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0, 0.1,0.5,1.0,2.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=50 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type50align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==51:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0, 0.1,0.5,1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=51 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type51align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==52:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0, 0.1,0.2,0.5,1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=52 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type52_1v1_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==53:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0, 0.1,0.5,1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=53 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type53align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==54:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0, 0.1,0.5,1.0]:
for local_t in [0.15,0.18,0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=54 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type54align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==55:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0.1]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=55 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type55align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==551:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0.1]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=55 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type55align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==550:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0.1]:
for local_t in [0.20]:
for pred_dim in [256,1024,2048]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=55 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 --pred_dim=%d " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha,pred_dim)
write_slurm_sh_faster(
"type55dim%d_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (pred_dim,local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==56:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05,0.06]:
for alpha in [0, 0.05,0.1,0.2]:
for local_t in [0.18, 0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=56 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type56align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==58:
for learning_rate in [0.06]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=58 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh("multimoco_proj_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name)
elif args.type==59:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=59 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type59_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==60:
for num_roi in [3,6,10,15,20,25,30]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=60 --num_roi=%d " \
" --mask_size=%d --local_t=%.4f --align=1 " % \
(learning_rate, args.data, epoch, 256,
256,learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type60_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==61:
indicate_list=[['',''],['6','96']]
for indication in indicate_list:
num_roi = indication[0]
crop_size= indication[1]
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=61 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --align=1 " % \
(learning_rate, args.data, epoch, 256, 256,
learning_rate / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type61_lgq_t_%.3f_%s_%s_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==62:
for learning_rate in [0.06]:
for alpha in [0,1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=62 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh("pixelembedshufflemoco_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name)
elif args.type==63:
for learning_rate in [0.06]:
for alpha in [0,1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=63 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh("pixelGLsync_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name)
elif args.type == 64:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0,0.1,0.2,0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=64 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data, local_t, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type64align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 65:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0,0.1,0.2,0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=65 --nmb_crops 1 %d " \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data, local_t, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type65align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 66:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0, 0.1, 0.2, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=66 --nmb_crops 1 %d " \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data, local_t, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type66align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 67:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06,0.08,0.09]:
for alpha in [0, 0.1, 0.2, 0.5]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=67 --nmb_crops 1 %d " \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data, local_t, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type67align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==68:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=68 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type68_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==69:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=69 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type69_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==70:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=70 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type70_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==71:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for alpha in [0,0.05,0.1,0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=71 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --alpha=%.4f " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t,alpha)
write_slurm_sh_faster(
"type71_lgq_t_%.3f_%d_%d_lr%.4f_alpha%.4f" % (local_t, num_roi, crop_size, learning_rate,alpha),
command_line, queue_name)
time.sleep(1)
elif args.type==72:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=72 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type72_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==73:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=73 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type73_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==74:
for crop_size in [64,96,128,160,192]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=74 --mask_size %d " \
" --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, crop_size, local_t)
write_slurm_sh_faster(
"type74_lgq_t_%.3f_mask%d_lr%.4f" % (local_t, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==75:
for num_roi in [3,6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=75 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type75_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==76 or args.type==98:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(9):
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type,num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type,shuffle_mode,local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==-76:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [0,1,7]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=76 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d --mlp_bn_stat=0 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_faster(
"type76_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (shuffle_mode,local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==77:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [0,1,2,3,5,6]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=77 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_faster(
"type77_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (shuffle_mode,local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==78:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [0,1,3,4,5,7]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=78 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_faster(
"type78_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (shuffle_mode,local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==79:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(2,11):
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=79 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode)
write_slurm_sh_faster(
"type79_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==80:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1,5,7]:
for mlp_bn_stat in [0,1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=80 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" \
" --mlp_bn_stat=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode,mlp_bn_stat)
write_slurm_sh_faster(
"type80_%d_lgq_t_%.3f_%d_%d_lr%.4f_bnmode%d" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate,mlp_bn_stat),
command_line, queue_name)
time.sleep(1)
elif args.type==81:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1,5,7]:
for mlp_bn_stat in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=81 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" \
" --mlp_bn_stat=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode,mlp_bn_stat)
write_slurm_sh_faster(
"type81_%d_lgq_t_%.3f_%d_%d_lr%.4f_bnmode%d" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate,mlp_bn_stat),
command_line, queue_name)
time.sleep(1)
elif args.type==82:
for num_roi in [6,16,32,64]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1,5]:
for mlp_bn_stat in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=82 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" \
" --mlp_bn_stat=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode,mlp_bn_stat)
write_slurm_sh_faster(
"type82_%d_lgq_t_%.3f_%d_%d_lr%.4f_bnmode%d" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate,mlp_bn_stat),
command_line, queue_name)
time.sleep(1)
elif args.type == 83 or args.type==84:
for num_roi in [1,3,5,10]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for alpha in [0.1,0.2,0.5,1.0,2.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --num_roi %d" \
" --mask_size %d --local_t=%.4f --align=1 --alpha=%f " \
" " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t,args.type, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type%d_lgq_t_%.3f_%d_%d_lr%.4f_alpha%f" % (args.type,
local_t, num_roi, crop_size, learning_rate,alpha),
command_line, queue_name)
time.sleep(1)
elif args.type==85:
for num_roi in [6,16,32,64]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1,5]:
for mlp_bn_stat in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=85 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" \
" --mlp_bn_stat=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode,mlp_bn_stat)
write_slurm_sh_faster(
"type85_%d_lgq_t_%.3f_%d_%d_lr%.4f_bnmode%d" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate,mlp_bn_stat),
command_line, queue_name)
time.sleep(1)
elif args.type==86:
for num_roi in [6,16,32]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1,5,7]:
for mlp_bn_stat in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=86 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode)
write_slurm_sh_faster(
"type86_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==87 or args.type==88 or args.type==93 or args.type==94 or args.type==95 or args.type==96:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t,args.type, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type,
local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==89 or args.type==90:
for num_roi in [1,5,10]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for alpha in [0.1,0.2,0.5,1.0,2.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --num_roi %d" \
" --mask_size %d --local_t=%.4f --align=1 --alpha=%f " \
" " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t,args.type, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type%d_lgq_t_%.3f_%d_%d_lr%.4f_alpha%f" % (args.type,
local_t, num_roi, crop_size, learning_rate,alpha),
command_line, queue_name)
time.sleep(1)
elif args.type==91:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t)
write_slurm_sh_faster(
"type%d_lgq_t_%.3f_lr%.4f" % (args.type, local_t, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==92:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(4):
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,shuffle_mode)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_lr%.4f" % (args.type,shuffle_mode, local_t, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==97:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(4):
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=97 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode)
write_slurm_sh_faster(
"type97_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==99 or args.type==103 or args.type==104 or args.type==105 \
or args.type==106 or args.type==107 or args.type==108 or args.type==109 \
or args.type==110 or args.type==111 or args.type==112 or args.type==113:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,shuffle_mode)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_lr%.4f" % (args.type,shuffle_mode, local_t, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==126 or args.type==127 or args.type==129 or args.type==131:
for learning_rate in [0.03]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(8):
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,shuffle_mode)
write_slurm_sh_faster(
"type%dablation_%d_lgq_t_%.3f_lr%.4f" % (args.type,shuffle_mode, local_t, learning_rate),
command_line, queue_name,environment=1)
time.sleep(1)
elif args.type==133 or args.type==134:
for learning_rate in [0.03]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(3):
for momentum_weight_decay in [0.9,0.99,0.999]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d --use_fp16=1 --momentum_stat=%f" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t, shuffle_mode,momentum_weight_decay)
write_slurm_sh_faster(
"type%dablation_%d_%f_lgq_t_%.3f_lr%.4f" % (
args.type, shuffle_mode,momentum_weight_decay, local_t, learning_rate),
command_line, queue_name, environment=1)
time.sleep(1)
elif args.type==128 or args.type==130 or args.type==132 or args.type==135 or args.type==136:
for learning_rate in [0.03]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,2,4,8,16,32,64,128]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,group_norm_size)
write_slurm_sh_faster(
"type%dgroupablation_%d_lgq_t_%.3f_lr%.4f" % (args.type,group_norm_size, local_t, learning_rate),
command_line, queue_name,environment=1)
time.sleep(1)
elif args.type==152:
for learning_rate in [0.03]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,2,4,8,16,32,64,128]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,group_norm_size)
write_slurm_sh_faster(
"type%dgroup_%d_lgq_t_%.3f_lr%.4f" % (args.type,group_norm_size, local_t, learning_rate),
command_line, queue_name,environment=0)
time.sleep(1)
elif args.type==137 or args.type==138:
for learning_rate in [0.03]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t)
write_slurm_sh_faster(
"type%d2bnablation_lgq_t_%.3f_lr%.4f" % (args.type,local_t, learning_rate),
command_line, queue_name,environment=1)
time.sleep(1)
elif args.type==118:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1]:
for conv_size in [1,2,3,4]:
for stride_size in [1,2,3]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d --loco_conv_size=%d " \
"--loco_conv_stride=%d" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t, shuffle_mode,conv_size,stride_size)
write_slurm_sh_faster(
"type%d_%d_conv%d_%d_lr%.4f" % (args.type, shuffle_mode, conv_size,
stride_size,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==114:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,2,4,8]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,group_norm_size)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_lr%.4f" % (args.type,group_norm_size, local_t, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==115 or args.type==116 or args.type==117 or args.type==120 \
or args.type==121 or args.type==122 or args.type==123 or args.type==124:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,8]:
for alpha in [1.0,3.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.14 0.05" \
" --size_crops 224 96 --nmb_crops 2 6 --max_scale_crops 1.0 0.14 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --alpha=%f " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,group_norm_size,alpha)
write_slurm_sh_faster(
"type%d_%d_alpha%f_lgq_t_%.3f_lr%.4f" % (args.type,group_norm_size,alpha, local_t, learning_rate),
command_line, queue_name,gpu_memory=True)
time.sleep(1)
elif args.type==-120:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1]:
for num_crops in [4,8,16,32]:
same_alpha = int(num_crops / 2) - 1
iter_alpha =[same_alpha,1.0] if same_alpha!=1 else [1.0]
for alpha in iter_alpha:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.14 " \
" --size_crops 96 --nmb_crops %d --max_scale_crops 1.0 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --alpha=%f --use_fp16=1" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_crops,abs(args.type), local_t, group_norm_size, alpha)
write_slurm_sh_faster(
"type%d_%d_%d_alpha%f_lgq_t_%.3f_lr%.4f" % (
args.type,num_crops, group_norm_size, alpha, local_t, learning_rate),
command_line, queue_name, gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==139 or args.type==140 or args.type==141 or args.type==142 \
or args.type==143 or args.type==144 or args.type==145 or args.type==146 or args.type==147:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1]:
for num_crops in [4,8,16]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.14 " \
" --size_crops 96 --nmb_crops %d --max_scale_crops 1.0 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_crops,args.type, local_t, group_norm_size)
write_slurm_sh_faster(
"type%dviewnorm_%d_%d_lgq_t_%.3f_lr%.4f" % (
args.type, num_crops,group_norm_size, local_t, learning_rate),
command_line, queue_name, gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==148 or args.type==149 or args.type==150:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1]:
for num_crops in [4,8,16,32]:
for crop_size in [224,96]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.2 " \
" --size_crops %d --nmb_crops %d --max_scale_crops 1.0 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, crop_size,num_crops, args.type, local_t, group_norm_size)
write_slurm_sh_faster(
"type%dviewnorm_%d_%d_group%d_lgq_t_%.3f_lr%.4f" % (
args.type, num_crops,crop_size, group_norm_size, local_t, learning_rate),
command_line, queue_name, gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==151:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
" --type=%d --min_scale_crops 0.14 0.05 " \
" --size_crops 224 96 --nmb_crops 4 6 --max_scale_crops 1.0 0.14" \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 --alpha 1.0" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t, group_norm_size)
write_slurm_sh_faster(
"type%dmultiquery_viewkey_group%d_lgq_t_%.3f_lr%.4f" % (
args.type, group_norm_size, local_t, learning_rate),
command_line, queue_name, gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==125:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for momentum_stat in [0.9,0.99,0.999]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.14 0.05" \
" --size_crops 224 96 --nmb_crops 2 6 --max_scale_crops 1.0 0.14 --type=%d " \
" --local_t=%.4f --align=1 --momentum_stat=%f " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,256,
learning_rate * args.node / 100,
local_t, args.type, local_t, momentum_stat)
write_slurm_sh_faster(
"type%d_momentum%f_lgq_t_%.3f_lr%.4f" % (
args.type, momentum_stat, local_t, learning_rate),
command_line, queue_name, gpu_memory=True)
time.sleep(1)
elif args.type==-108:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for batch_size in [1024]:
for shuffle_mode in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * batch_size/256, args.data, epoch, batch_size,
256,
learning_rate * batch_size/256/ 100,
local_t, abs(args.type), local_t,shuffle_mode)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_lr%.4f" % (args.type,shuffle_mode, local_t, learning_rate*batch_size/256),
command_line, queue_name,gpu_memory=True)
time.sleep(1)
elif args.type==100:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,2,4,8]:
command_line = "python3 main_adco.py --mode=1 --lr=%f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --group_norm_size=%d " % \
(learning_rate/2, args.data, epoch, 128,
128,
learning_rate/ 200,
local_t,args.type, num_roi, crop_size, local_t,group_norm_size)
write_slurm_sh_faster(
"type%d_group%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type,group_norm_size,
local_t, num_roi, crop_size, learning_rate),
command_line, queue_name,gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==101:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_num in [1,2,4,8]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=101 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --group_norm_size=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, group_num)
write_slurm_sh_faster(
"type101_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
group_num, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==102:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [0,1,7]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type,num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type,shuffle_mode,local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.mode==2:
if args.type==58:
for learning_rate in [0.06]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=58 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh_multi("multimoco_proj_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name,
nodes=args.node,gpu_per_node=args.gpu)
elif args.type==59:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [800]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=59 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate*args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate*args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_multi(
"type59_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
elif args.type==61:
for num_roi in ['','6']:
for crop_size in ['','96']:
for learning_rate in [0.04,0.06,0.08]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=61 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --align=1 --ngpu=%d " % \
(learning_rate, args.data, epoch, 256,256,
learning_rate / 100,
local_t, num_roi, crop_size, local_t,args.gpu)
write_slurm_sh_multi(
"type61_lgq_t_%.3f_%s_%s_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==77:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [5]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=77 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_multi(
"type77_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (shuffle_mode,local_t, num_roi, crop_size, learning_rate*args.node),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==87 or args.type==88 or args.type==94:
if args.type==87:
roi_num_list=[32]
elif args.type==88:
roi_num_list = [6,32]
else:
roi_num_list = [0]
for num_roi in roi_num_list:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [800]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 128,
learning_rate * args.node / 100,
local_t,args.type, num_roi, crop_size, local_t)
if args.queue<=1:
write_slurm_sh_multi2(
"type%d_lgq_t_%.3f_%d_%d_lr%.4f_epoch%d" % (args.type,
local_t, num_roi, crop_size,
learning_rate, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"type%d_lgq_t_%.3f_%d_%d_lr%.4f_epoch%d" % (args.type,
local_t, num_roi, crop_size, learning_rate,epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type == 100:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,2,4,8,16]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --group_norm_size=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t,args.type, num_roi, crop_size, local_t,group_norm_size)
if args.node>=4:
command_line += " --warmup_epochs=10 "
if args.queue <= 1:
write_slurm_sh_multi2(
"type%d_group%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type,group_norm_size,
local_t, num_roi, crop_size, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"type%d_group%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type, group_norm_size,
local_t, num_roi, crop_size,
learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==101:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_num in [1,2,4,8,16]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=101 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --group_norm_size=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, group_num)
if args.node >= 4:
command_line += " --warmup_epochs=10 "
if args.queue <= 1:
write_slurm_sh_multi2(
"type101_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
group_num, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"type101_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
group_num, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==119:
for batch_size in [4096]:
if True:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [800]:
for group_num in [1,8,16,32]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * batch_size / 256, args.data, epoch, batch_size,
256,
learning_rate * batch_size / 256 / 100,
local_t, abs(args.type), local_t,group_num)
command_line += " --warmup_epochs=10 "
write_slurm_sh_multi(
"mocov2bigbatch_type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_num, learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu, gpu_memory=True,
environment=1)
elif args.type==115 or args.type==120:
for batch_size in [2048]:
for learning_rate in [0.045]:
for local_t in [0.2]:
for epoch in [800]:
for group_norm_size in [64]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=10 --tensorboard=1 --min_scale_crops 0.14 0.05" \
" --size_crops 224 96 --nmb_crops 2 6 --max_scale_crops 1.0 0.14 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --alpha=%f --use_fp16=1 " % \
(learning_rate * batch_size/256, args.data, epoch, batch_size,
256,
learning_rate * batch_size/256/ 100,
local_t, args.type, local_t,group_norm_size,alpha)
write_slurm_sh_multi(
"multimoco_type%d_%d_alpha%f_lgq_t_%.3f_lr%.4f" % (args.type,group_norm_size,alpha, local_t, learning_rate),
command_line, queue_name,nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==149:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [1000]:
for group_norm_size in [1]:
for num_crops in [4]:
for crop_size in [224]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.2 " \
" --size_crops %d --nmb_crops %d --max_scale_crops 1.0 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
512,
learning_rate * args.node / 100,
local_t, crop_size,num_crops, args.type, local_t, group_norm_size)
write_slurm_sh_multi2(
"mocov2_%dview_type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, num_crops,group_norm_size, learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu, gpu_memory=False,
environment=0)
time.sleep(1)
elif args.type==151:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [1000]:
for group_norm_size in [1]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
" --type=%d --min_scale_crops 0.14 0.05 " \
" --size_crops 224 96 --nmb_crops 4 6 --max_scale_crops 1.0 0.14" \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 --alpha=1.0" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
512,
learning_rate * args.node / 100,
local_t, args.type, local_t, group_norm_size)
write_slurm_sh_multi(
"type%dmultiquery_viewkey_group%d_lgq_t_%.3f_lr%.4f" % (
args.type, group_norm_size, local_t, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.mode==6:
if args.type==0 or args.type==1 or args.type==2 or args.type==3:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [512]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=0.9 " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d " \
% (
args.type, args.data, epoch, batch_size,local_t, num_roi, crop_size, args.node * 64)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,
local_t, num_roi,
crop_size,
epoch),
command_line, queue_name)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"mocov3type%d_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, local_t, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==4 or args.type==5 or args.type==6:
for num_roi in [1]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for group_norm_size in [1,2,4,8]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d " \
% (args.type, args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,group_norm_size)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_%d_%d_epoch%d" % (args.type,
group_norm_size,
learning_rate,
local_t,
num_roi,
crop_size,
epoch),
command_line, queue_name,gpu_memory=True)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,group_norm_size,learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,learning_rate,local_t, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==7 or args.type==8:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for group_norm_size in [1,2,4,8]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
% (args.type, args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,group_norm_size)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_%d_%d_epoch%d" % (args.type,
group_norm_size,
learning_rate,
local_t,
num_roi,
crop_size,
epoch),
command_line, queue_name,gpu_memory=True,environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,group_norm_size,learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,learning_rate,local_t, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==-7:
combine_choice=[1024,16]
for num_roi in [10]:
for crop_size in [96]:
for learning_rate in [0.3]:
for local_t in [1.0]:
for epoch in [1000]:
for batch_size,group_norm_size in combine_choice:
command_line = "python3 main_adco.py --mode=6 --type=7 --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1.5e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.996 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
% ( args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,group_norm_size)
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size, learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size, learning_rate, local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==-13:
combine_choice=[[4096,1],[4096,64]]
for num_roi in [20]:
for crop_size in [96]:
for learning_rate in [0.3]:
for local_t in [1.0]:
for epoch in [1000]:
for batch_size,group_norm_size in combine_choice:
command_line = "python3 main_adco.py --mode=6 --type=13 --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1.5e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.996 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
% ( args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,group_norm_size)
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size, learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size, learning_rate, local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==9 or args.type==10:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for ema_param in [0.001,0.01,0.1]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --momentum_stat=%f --use_fp16=1 " \
% (args.type, args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,ema_param)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%f_%flgq_t_%.3f_%d_%d_epoch%d" % (args.type,
ema_param,
learning_rate,
local_t,
num_roi,
crop_size,
epoch),
command_line, queue_name,gpu_memory=True,environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,group_norm_size,learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,learning_rate,local_t, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==11:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for ema_param in [0.999]:
for group_norm_size in [1,4,8,16]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --momentum_stat=%f --use_fp16=1 --group_norm_size=%d " \
% (args.type, args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,ema_param,group_norm_size)
if args.node == 1:
write_slurm_sh_faster(
"mocov3type%d_%f_%d_%flgq_t_%.3f_%d_%d_epoch%d" % (args.type,
group_norm_size,
ema_param,
learning_rate,
local_t,
num_roi,
crop_size,
epoch),
command_line, queue_name, gpu_memory=True, environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,ema_param, learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,ema_param, learning_rate, local_t, num_roi,
crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==12:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for group_norm_size in [8]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
% (args.type, args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,group_norm_size)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_%d_%d_epoch%d" % (args.type,
group_norm_size,
learning_rate,
local_t,
num_roi,
crop_size,
epoch),
command_line, queue_name,gpu_memory=True,environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,group_norm_size,learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=False,environment=0)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,learning_rate,local_t, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==13 or args.type==14 or args.type==15:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for group_norm_size in [1,4,8,16]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 " \
" --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
% (args.type, args.data, epoch, batch_size, learning_rate,
local_t, args.node * 64, group_norm_size)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_epoch%d" % (args.type,
group_norm_size,
learning_rate,
local_t,
epoch),
command_line, queue_name, gpu_memory=True, environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, learning_rate,
local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=False, environment=0)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==19:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for group_norm_size in [1,4,8,16,32]:
for key_group_norm_size in [1,4,8,16,32]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 " \
" --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --key_group=%d " \
"--use_fp16=1 " \
% (args.type, args.data, epoch, batch_size, learning_rate,
local_t, args.node * 64, group_norm_size,key_group_norm_size)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%d_%flgq_t_%.3f_epoch%d" % (args.type,
group_norm_size,
key_group_norm_size,
learning_rate,
local_t,
epoch),
command_line, queue_name, gpu_memory=True, environment=1)
else:
if args.queue <= 3:
write_slurm_sh_multi2(
"mocov3type%d_%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, key_group_norm_size,learning_rate,
local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=False, environment=0)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, key_group_norm_size,learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==16:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for crop_size in [4,8,16]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 " \
" --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=1 --use_fp16=1 " \
"--nmb_crops %d" \
% (args.type, args.data, epoch, batch_size, learning_rate,
local_t, args.node * 64,crop_size )
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_epoch%d" % (args.type,
crop_size,
learning_rate,
local_t,
epoch),
command_line, queue_name, gpu_memory=True, environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, crop_size, learning_rate,
local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=False, environment=0)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, crop_size, learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==17 or args.type==18:
warmup_epoch=10
for learning_rate in [1.5e-4]:
for local_t in [0.2]:
for epoch in [100]:
for batch_size in [1024]:
if args.type==18:
group_list = [1,2,4,8,16,32,64,128]
else:
group_list = [1]
for group_norm_size in group_list:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=0.1 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f " \
" --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
"--warmup_epochs %d -a vit_small --crop_min 0.08 " \
% (args.type, args.data, epoch, batch_size, learning_rate,
local_t, 256 , group_norm_size,warmup_epoch)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_epoch%d" % (args.type,
group_norm_size,
learning_rate,
local_t,
epoch),
command_line, queue_name, gpu_memory=True, environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, learning_rate,
local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=False, environment=0)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.mode==7:
if args.type==0 or args.type==1 or args.type==2 or args.type==3 or args.type==4:
for num_roi in [16]:
for crop_size in [96]:
for learning_rate in [0.05]:
for barch_size in [512]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=7 --type=%d " \
" --data=%s --epochs=%d --start_epoch=0 --batch_size=%d " \
"--lr=%f --weight_decay=1e-4 --dist_url=tcp://localhost:10031 " \
"--rank=0 --multiprocessing_distributed=1 --world_size=1 " \
"--moco_dim=2048 --mlp_dim=512 --nmb_crops 1 %d --size_crops 224 %d " \
"--align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 --knn_batch_size=%d "\
%(args.type,args.data,epoch,barch_size,learning_rate,num_roi,crop_size,max(64*args.node,256))
if args.node==1:
write_slurm_sh_faster("simsiamtype%d_%d_%d_epoch%d" % (args.type, num_roi, crop_size,
epoch),command_line, queue_name,)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"simsiamtype%d_%d_%d_epoch%d" % (args.type, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"simsiamtype%d_%d_%d_epoch%d" % (args.type, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==5 or args.type==6 or args.type==7 or args.type==8 or args.type==9:
for learning_rate in [0.05]:
for barch_size in [512]:
for epoch in [100]:
for group_norm_size in [1, 2, 4, 8,16,32,64]:
command_line = "python3 main_adco.py --mode=7 --type=%d " \
" --data=%s --epochs=%d --start_epoch=0 --batch_size=%d " \
"--lr=%f --weight_decay=1e-4 --dist_url=tcp://localhost:10031 " \
"--rank=0 --multiprocessing_distributed=1 --world_size=1 " \
"--moco_dim=2048 --mlp_dim=512 --group_norm_size=%d " \
"--align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 --knn_batch_size=%d " \
"--use_fp16=1 " \
% (args.type, args.data, epoch, barch_size, learning_rate,group_norm_size,
max(64 * args.node, 256))
if args.node == 1:
write_slurm_sh_faster("simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size,
epoch), command_line, queue_name,
gpu_memory=True,environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True,environment=1)
else:
write_slurm_sh_multi(
"simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==-6:
for learning_rate in [0.05]:
for barch_size in [256,512]:
for epoch in [800]:
for group_norm_size in [8]:
command_line = "python3 main_adco.py --mode=7 --type=%d " \
" --data=%s --epochs=%d --start_epoch=0 --batch_size=%d " \
"--lr=%f --weight_decay=1e-4 --dist_url=tcp://localhost:10031 " \
"--rank=0 --multiprocessing_distributed=1 --world_size=1 " \
"--moco_dim=2048 --mlp_dim=512 --group_norm_size=%d " \
"--align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 --knn_batch_size=%d " \
"--use_fp16=1 " \
% (abs(args.type), args.data, epoch, barch_size, learning_rate,group_norm_size,
max(64 * args.node, 256))
if args.node == 1:
write_slurm_sh_faster("simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size,
epoch), command_line, queue_name,
gpu_memory=True )
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==10:
for learning_rate in [0.05]:
for barch_size in [512]:
for epoch in [100]:
for crop_size in [4, 8,16]:
command_line = "python3 main_adco.py --mode=7 --type=%d " \
" --data=%s --epochs=%d --start_epoch=0 --batch_size=%d " \
"--lr=%f --weight_decay=1e-4 --dist_url=tcp://localhost:10031 " \
"--rank=0 --multiprocessing_distributed=1 --world_size=1 " \
"--moco_dim=2048 --mlp_dim=512 --nmb_crops %d " \
"--align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 --knn_batch_size=%d " \
"--use_fp16=1 " \
% (args.type, args.data, epoch, barch_size, learning_rate,crop_size,
max(64 * args.node, 256))
if args.node == 1:
write_slurm_sh_faster("simsiamtype%d_%d_epoch%d" % (args.type,crop_size,
epoch), command_line, queue_name,
gpu_memory=True,environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"simsiamtype%d_%d_epoch%d" % (args.type,crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True,environment=1)
else:
write_slurm_sh_multi(
"simsiamtype%d_%d_epoch%d" % (args.type,crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True,environment=1)
time.sleep(1)
elif args.mode==5:
if args.type==0:
if args.F is None:
command_line = "python3 main_adco.py --mode=5 --type=0 --data=%s --epochs 200 --lr=0.6 "\
"--lr_final 0.0006 --batch_size=256 --warmup_epochs 0 --freeze_prototypes_niters 5005 "\
"--queue_length 3840 --epoch_queue_starts 15 --dist_url=tcp://localhost:10031 "\
"--knn_batch_size=256 --cos=1 --momentum=0.9 --weight_decay=1e-6 --world_size=1 "\
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 --moco_k=3000 --moco_t=0.1 "\
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 --dump_path %s"%(args.data,dump_path)
write_slurm_sh("swav_baseline" , command_line, queue_name)
else:
args.F= os.path.abspath(args.F)
command_line = "python3 main_adco.py --mode=5 --type=0 --data=%s --epochs 200 --lr=0.6 " \
"--lr_final 0.0006 --batch_size=256 --warmup_epochs 0 --freeze_prototypes_niters 5005 " \
"--queue_length 3840 --epoch_queue_starts 15 --dist_url=tcp://localhost:10031 " \
"--knn_batch_size=256 --cos=1 --momentum=0.9 --weight_decay=1e-6 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 --moco_k=3000 --moco_t=0.1 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--resume=%s --dump_path %s " % (args.data,args.F,dump_path)
resume_name= os.path.split(os.path.abspath(args.F))[1]
write_slurm_sh("swav_baseline_resume%s"%resume_name, command_line, queue_name)
elif args.mode==8:
if args.type==0 or args.type==1:
for epoch in [100]:
for batch_size in [2048]:
for lr_w in [0.2]:
for lr_bias in [0.0048]:
for alpha in [0.51]:
command_line="python3 main.py %s --epochs=%d " \
"--batch-size=%d --learning-rate-weights=%f --learning-rate-biases=%f " \
"--weight-decay=1e-6 --lambd=%f --type=%d --knn_neighbor=20 " \
"--knn_freq=1 --knn_batch_size=%d --tensorboard=1 "%(args.data,epoch,
batch_size,lr_w,lr_bias,alpha,args.type,256 )
if args.node==1:
write_slurm_sh_faster("BTtype%d_%d_epoch%d" % (args.type,batch_size,epoch), command_line, queue_name,
gpu_memory=False, environment=0)
else:
write_slurm_sh_multi2(
"BTtype%d_%d_epoch%d" % (args.type, batch_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=False, environment=0)
elif args.type==2:
for epoch in [100]:
for batch_size in [1024]:
for lr_w in [0.2]:
for lr_bias in [0.0048]:
for alpha in [0.51]:
for group_size in [2,4,8,16,32]:
command_line = "python3 main.py %s --epochs=%d " \
"--batch-size=%d --learning-rate-weights=%f --learning-rate-biases=%f " \
"--weight-decay=1e-6 --lambd=%f --type=%d --knn_neighbor=20 " \
"--knn_freq=1 --knn_batch_size=%d --tensorboard=1 --group_norm_size=%d " % (args.data, epoch,
batch_size, lr_w,
lr_bias, alpha,
args.type, 256,group_size)
write_slurm_sh_faster("BTtype%d_%d_%d_epoch%d" % (args.type,group_size, batch_size,epoch), command_line, queue_name,
gpu_memory=False, environment=0)
elif args.mode==0:
use_bn=args.type
for lr in [20]:
for weight_decay in [1e-6,1e-7,1e-8,1e-9]:
command_line = "python3 lincls.py --data=%s --dist-url=tcp://localhost:10031 " \
"--pretrained='%s' --lr=%.4f --final_lr=%.8f --dataset=ImageNet --use_bn=%d --wd %.8f" % (
args.data, args.F, lr, lr / 100, use_bn,weight_decay)
write_slurm_sh("linear_eval_%s_%.4f_bn%d_wd_%f" % (args.comment, lr, use_bn,weight_decay), command_line, queue_name)
time.sleep(1)
elif args.mode==-2:
use_bn = args.type
for lr in [1.0]:
for weight_decay in [1e-5,1e-6,1e-7,1e-8,1e-9]:
command_line = "python3 lincls.py --data=%s --dist-url=tcp://localhost:10031 --batch-size=4096 " \
"--pretrained='%s' --lr=%.4f --final_lr=%.8f --dataset=ImageNet --use_bn=%d --wd %.8f" % (
args.data, args.F, lr, lr / 100, use_bn, weight_decay)
write_slurm_sh("linearb4096_eval_%s_%.4f_bn%d_wd_%.8f" % (args.comment, lr, use_bn, weight_decay), command_line,
queue_name)
elif args.mode==-1:
command_line = "python3 encode.py --data=%s --dist-url=tcp://localhost:10031 " \
"--pretrained='%s' --dataset=ImageNet " % (args.data, args.F)
write_slurm_sh("encode_%s" % (args.comment), command_line, queue_name)
elif args.mode==-3:
command_line = "python3 main_adco.py --sym=0 --lr=0.03 --memory_lr=3 --moco_t=0.12 " \
"--mem_t=0.02 --data=%s --dist_url=tcp://localhost:10001 --mode=0 " \
"--epochs=200 --moco_dim=128 --moco_m=0.999 --moco_k=65536 --cluster=65536 " \
"--knn_neighbor=20 --knn_freq=1 --data=imagenet --batch_size=256 --ad_init=1 "%(args.data)
write_slurm_sh("type0",command_line,queue_name)
elif args.mode==-4:
use_bn = args.type
vit_model =True
for lr in [0.05,0.1]:
for weight_decay in [0]:
for model_type in [0]:
command_line ="python lincls_lars.py -a resnet50 --dist-url 'tcp://localhost:10001' " \
"--multiprocessing-distributed --world-size 1 --rank 0 --pretrained='%s' --lr %f --wd %f " \
"--lars --data %s --use_bn=%d --model_type=%d "%(args.F,lr,
weight_decay,args.data,use_bn,model_type)
if vit_model:
command_line +=" --arch vit_small"
write_slurm_sh("linear_larsb4096_eval_%s_bn%d_%.4f_wd_%.8f" % (args.comment, use_bn,lr,weight_decay),
command_line,
queue_name)
elif args.mode==-40:
use_bn = args.type
study_dir = os.path.abspath(args.F)
checkpoint_name = "checkpoint_0099.pth.tar"
for item in os.listdir(study_dir):
if item== checkpoint_name:
current_model_path = os.path.join(study_dir,item)
current_dir = study_dir
current_comment = os.path.split(current_dir)[1]
else:
current_dir = os.path.join(study_dir,item)
current_comment = os.path.split(current_dir)[1]
current_model_path = find_checkpoint(current_dir,checkpoint_name)
if current_model_path is None:
print("%s dir did not find checkpoint"%current_dir)
continue
if not os.path.exists(current_model_path):
print("%s model path did not exist"%current_model_path)
continue
print("fintune %s model"%current_model_path)
for lr in [0.05, 0.1]:
for weight_decay in [0]:
for model_type in [0]:
command_line = "python lincls_lars.py -a resnet50 --dist-url 'tcp://localhost:10001' " \
"--multiprocessing-distributed --world-size 1 --rank 0 --pretrained='%s' --lr %f --wd %f " \
"--lars --data %s --use_bn=%d --model_type=%d " % (current_model_path, lr,
weight_decay, args.data, use_bn,
model_type)
write_slurm_sh(
"linear_larsb4096_eval_%s_bn%d_%.4f_wd_%.8f" % (str(args.comment)+current_comment, use_bn, lr, weight_decay),
command_line,
queue_name)
elif args.mode==-5:
config_dict={}
config_path = os.path.join(os.getcwd(),"detection")
config_path = os.path.join(config_path,"configs")
config_dict['VOC']=os.path.join(config_path,"pascal_voc_R_50_C4_24k_loco.yaml")
config_dict['VOC_freeze'] = os.path.join(config_path, "pascal_voc_R_50_C4_24k_loco_freeze.yaml")
config_dict['COCO'] = os.path.join(config_path,"coco_R_50_C4_2x.yaml_loco.yaml")
config_dict['COCO_freeze'] =os.path.join(config_path,"coco_R_50_C4_2x.yaml_loco_freeze.yaml")
model_path = os.path.abspath(args.F)
model_name = os.path.split(model_path)[1].replace(".pkl","")
for kk in range(5):
for config_now in ['VOC','VOC_freeze']:
command_line = "python detection/train_net.py --config-file %s --num-gpus 8" \
" MODEL.WEIGHTS %s"%(config_dict[config_now],args.F)
write_slurm_sh_faster("detection_%s_run%d_%s" % (config_now, kk,model_name),
command_line, queue_name, gpu_memory=True)
for config_now in ['COCO',"COCO_freeze"]:
command_line = "python detection/train_net.py --config-file %s --num-gpus 8" \
" MODEL.WEIGHTS %s" % (config_dict[config_now], args.F)
write_slurm_sh_faster("detection_%s_%s" % (config_now, model_name),
command_line, queue_name, gpu_memory=True)
elif args.mode==-6:
for lr in [0.03,0.06,0.1,0.15,0.12]:
for weight_decay in [0]:
command_line ="python main_lincls.py -a resnet50 --dist-url 'tcp://localhost:10001' " \
"--multiprocessing-distributed --world-size 1 --rank 0 --pretrained='%s' --lr %f --wd %f " \
" %s "%(args.F,lr,weight_decay,args.data)
write_slurm_sh("linear_main_lincls_%s_%.4f_wd_%.8f" % (args.comment, lr,weight_decay),
command_line,
queue_name)
| true
| true
|
7904fecb154ee2a0af2ff7337197693527639f0d
| 1,698
|
py
|
Python
|
simone/settings/dev.py
|
ross/simone
|
cfee8eaa04a7ddd235f735fa6c07adac28b4c6a4
|
[
"MIT"
] | null | null | null |
simone/settings/dev.py
|
ross/simone
|
cfee8eaa04a7ddd235f735fa6c07adac28b4c6a4
|
[
"MIT"
] | 1
|
2021-11-04T13:47:28.000Z
|
2021-11-04T13:47:28.000Z
|
simone/settings/dev.py
|
ross/simone
|
cfee8eaa04a7ddd235f735fa6c07adac28b4c6a4
|
[
"MIT"
] | 1
|
2021-10-20T14:44:19.000Z
|
2021-10-20T14:44:19.000Z
|
from os import environ
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent.parent
DEBUG = True
CRON_ENABLED = False
if 'SIMONE_DB_NAME' in environ:
DATABASES = {
'default': {
'ENGINE': 'mysql.connector.django',
'NAME': environ['SIMONE_DB_NAME'],
'USER': environ['SIMONE_DB_USER'],
'PASSWORD': environ['SIMONE_DB_PASSWORD'],
'HOST': environ['SIMONE_DB_HOST'],
'PORT': environ.get('SIMONE_DB_PORT', '3306'),
'CONN_MAX_AGE': 300,
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db' / 'db.sqlite3',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(asctime)s %(levelname)-5s %(name)s %(message)s',
'datefmt': '%Y-%m-%dT%H:%M:%SZ',
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'simple',
},
'file': {
'class': 'logging.handlers.WatchedFileHandler',
'level': 'DEBUG',
'formatter': 'simple',
'filename': 'django.log',
},
},
'root': {'level': 'DEBUG', 'handlers': ('console', 'file')},
'loggers': {
'django.db.backends': {
# comment out to see db queries
'level': 'INFO'
},
'slack_bolt': {
# super noisy
'level': 'INFO'
},
},
}
| 25.727273
| 73
| 0.490577
|
from os import environ
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent.parent
DEBUG = True
CRON_ENABLED = False
if 'SIMONE_DB_NAME' in environ:
DATABASES = {
'default': {
'ENGINE': 'mysql.connector.django',
'NAME': environ['SIMONE_DB_NAME'],
'USER': environ['SIMONE_DB_USER'],
'PASSWORD': environ['SIMONE_DB_PASSWORD'],
'HOST': environ['SIMONE_DB_HOST'],
'PORT': environ.get('SIMONE_DB_PORT', '3306'),
'CONN_MAX_AGE': 300,
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db' / 'db.sqlite3',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(asctime)s %(levelname)-5s %(name)s %(message)s',
'datefmt': '%Y-%m-%dT%H:%M:%SZ',
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'simple',
},
'file': {
'class': 'logging.handlers.WatchedFileHandler',
'level': 'DEBUG',
'formatter': 'simple',
'filename': 'django.log',
},
},
'root': {'level': 'DEBUG', 'handlers': ('console', 'file')},
'loggers': {
'django.db.backends': {
'level': 'INFO'
},
'slack_bolt': {
'level': 'INFO'
},
},
}
| true
| true
|
7904ff9df1b6a7e4e34577ff9020c0fdc24279c7
| 884
|
py
|
Python
|
starter_code/migrations/versions/eb02de174736_.py
|
nkatwesigye/project_furry
|
df6e2cb2e71cec44f1d8dc31f3955055f2be511c
|
[
"Apache-2.0"
] | null | null | null |
starter_code/migrations/versions/eb02de174736_.py
|
nkatwesigye/project_furry
|
df6e2cb2e71cec44f1d8dc31f3955055f2be511c
|
[
"Apache-2.0"
] | null | null | null |
starter_code/migrations/versions/eb02de174736_.py
|
nkatwesigye/project_furry
|
df6e2cb2e71cec44f1d8dc31f3955055f2be511c
|
[
"Apache-2.0"
] | null | null | null |
"""empty message
Revision ID: eb02de174736
Revises: c0de0819f9f0
Create Date: 2020-02-04 18:29:57.302993
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'eb02de174736'
down_revision = 'c0de0819f9f0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('Shows', 'name',
existing_type=sa.VARCHAR(),
nullable=False)
op.create_foreign_key(None, 'Shows', 'Venue', ['name'], ['name'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'Shows', type_='foreignkey')
op.alter_column('Shows', 'name',
existing_type=sa.VARCHAR(),
nullable=True)
# ### end Alembic commands ###
| 25.257143
| 69
| 0.644796
|
from alembic import op
import sqlalchemy as sa
revision = 'eb02de174736'
down_revision = 'c0de0819f9f0'
branch_labels = None
depends_on = None
def upgrade():
)
| true
| true
|
790500ca58f7532d0cdf20ac5ee364b9dd209ad0
| 970
|
py
|
Python
|
whoami.py
|
lmanul/awty
|
10b1844a0eaf12dd47d4a84eca32a0c7d947f538
|
[
"Apache-2.0"
] | null | null | null |
whoami.py
|
lmanul/awty
|
10b1844a0eaf12dd47d4a84eca32a0c7d947f538
|
[
"Apache-2.0"
] | null | null | null |
whoami.py
|
lmanul/awty
|
10b1844a0eaf12dd47d4a84eca32a0c7d947f538
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- #
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.api import users
from google.appengine.ext import webapp
from util import *
import webapp2
class WhoAmIHandler(webapp2.RequestHandler):
def get(self):
self.response.out.write(Util.getUsernameFromEmail(users.get_current_user().email()))
app = webapp2.WSGIApplication(
[
('/whoami', WhoAmIHandler),
], debug=True)
| 27.714286
| 88
| 0.736082
|
mport users
from google.appengine.ext import webapp
from util import *
import webapp2
class WhoAmIHandler(webapp2.RequestHandler):
def get(self):
self.response.out.write(Util.getUsernameFromEmail(users.get_current_user().email()))
app = webapp2.WSGIApplication(
[
('/whoami', WhoAmIHandler),
], debug=True)
| true
| true
|
7905021a4511341a4731d142f2c60743cf730a1f
| 640
|
py
|
Python
|
blog/admin.py
|
jinjf553/mysite
|
e6c936ba6cd3e89d13434ff3f42a858e96231cae
|
[
"MIT"
] | 1
|
2020-01-20T14:49:44.000Z
|
2020-01-20T14:49:44.000Z
|
blog/admin.py
|
jinjf553/mysite
|
e6c936ba6cd3e89d13434ff3f42a858e96231cae
|
[
"MIT"
] | null | null | null |
blog/admin.py
|
jinjf553/mysite
|
e6c936ba6cd3e89d13434ff3f42a858e96231cae
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from account.models import UserProfile
from blog.models import BlogArticles
class BlogArticlesAdmin(admin.ModelAdmin):
list_display = ("title", "author", "publish")
list_filter = ("publish", "author")
search_fields = ("title", "body")
raw_id_fields = ("author",)
date_hierarchy = "publish"
ordering = ("-publish", "author")
admin.site.register(BlogArticles, BlogArticlesAdmin)
class UserProfileAdmin(admin.ModelAdmin):
list_display = ("user", "birth", "phone")
list_filter = ("phone",)
admin.site.register(UserProfile, UserProfileAdmin)
| 24.615385
| 52
| 0.715625
|
from django.contrib import admin
from account.models import UserProfile
from blog.models import BlogArticles
class BlogArticlesAdmin(admin.ModelAdmin):
list_display = ("title", "author", "publish")
list_filter = ("publish", "author")
search_fields = ("title", "body")
raw_id_fields = ("author",)
date_hierarchy = "publish"
ordering = ("-publish", "author")
admin.site.register(BlogArticles, BlogArticlesAdmin)
class UserProfileAdmin(admin.ModelAdmin):
list_display = ("user", "birth", "phone")
list_filter = ("phone",)
admin.site.register(UserProfile, UserProfileAdmin)
| true
| true
|
790502f1b0341d383af2e685a4180c3273801f40
| 506
|
py
|
Python
|
2020/day09.py
|
iKevinY/advent
|
d160fb711a0a4d671f53cbd61088117e7ff0276a
|
[
"MIT"
] | 11
|
2019-12-03T06:32:37.000Z
|
2021-12-24T12:23:57.000Z
|
2020/day09.py
|
iKevinY/advent
|
d160fb711a0a4d671f53cbd61088117e7ff0276a
|
[
"MIT"
] | null | null | null |
2020/day09.py
|
iKevinY/advent
|
d160fb711a0a4d671f53cbd61088117e7ff0276a
|
[
"MIT"
] | 1
|
2019-12-07T06:21:31.000Z
|
2019-12-07T06:21:31.000Z
|
import fileinput
from itertools import permutations
SEQ = [int(x) for x in fileinput.input()]
LEN = 25
for i in range(LEN, len(SEQ)):
for x, y in permutations(SEQ[i-LEN:i], 2):
if x + y == SEQ[i]:
break
else:
INVALID = SEQ[i]
print "Part 1:", INVALID
break
for n in range(2, len(SEQ)):
tot = 0
for i in range(len(SEQ)-n):
tot = sum(SEQ[i:i+n])
if tot == INVALID:
print "Part 2:", min(SEQ[i:i+n]) + max(SEQ[i:i+n])
| 22
| 62
| 0.529644
|
import fileinput
from itertools import permutations
SEQ = [int(x) for x in fileinput.input()]
LEN = 25
for i in range(LEN, len(SEQ)):
for x, y in permutations(SEQ[i-LEN:i], 2):
if x + y == SEQ[i]:
break
else:
INVALID = SEQ[i]
print "Part 1:", INVALID
break
for n in range(2, len(SEQ)):
tot = 0
for i in range(len(SEQ)-n):
tot = sum(SEQ[i:i+n])
if tot == INVALID:
print "Part 2:", min(SEQ[i:i+n]) + max(SEQ[i:i+n])
| false
| true
|
790504406794d9ba01c6c6442dd9b3b489661732
| 3,495
|
py
|
Python
|
minette/tagger/mecabservice.py
|
uezo/minette-python
|
dd8cd7d244b6e6e4133c8e73d637ded8a8c6846f
|
[
"Apache-2.0"
] | 31
|
2017-12-18T15:35:42.000Z
|
2021-12-16T07:27:33.000Z
|
minette/tagger/mecabservice.py
|
uezo/minette-python
|
dd8cd7d244b6e6e4133c8e73d637ded8a8c6846f
|
[
"Apache-2.0"
] | 17
|
2017-07-13T22:25:08.000Z
|
2020-11-02T14:19:32.000Z
|
minette/tagger/mecabservice.py
|
uezo/minette-python
|
dd8cd7d244b6e6e4133c8e73d637ded8a8c6846f
|
[
"Apache-2.0"
] | 2
|
2017-09-14T09:28:35.000Z
|
2021-01-17T12:31:54.000Z
|
""" Tagger using mecab-service """
import traceback
import requests
from ..models import WordNode
from .base import Tagger
class MeCabServiceNode(WordNode):
"""
Parsed word node by MeCabServiceTagger
Attributes
----------
surface : str
Surface of word
part : str
Part of the word
part_detail1 : str
Detail1 of part
part_detail2 : str
Detail2 of part
part_detail3 : str
Detail3 of part
stem_type : str
Stem type
stem_form : str
Stem form
word : str
Word itself
kana : str
Japanese kana of the word
pronunciation : str
Pronunciation of the word
"""
@classmethod
def create(cls, surface, features):
"""
Create instance of MeCabServiceNode
Parameters
----------
surface : str
Surface of the word
features : dict
Features analyzed by MeCabService
"""
return cls(
surface=surface,
part=features["part"],
part_detail1=features["part_detail1"],
part_detail2=features["part_detail2"],
part_detail3=features["part_detail3"],
stem_type=features["stem_type"],
stem_form=features["stem_form"],
word=features["word"],
kana=features["kana"],
pronunciation=features["pronunciation"]
)
class MeCabServiceTagger(Tagger):
"""
Tagger using mecab-service
Attributes
----------
config : minette.Config
Configuration
timezone : pytz.timezone
Timezone
logger : logging.Logger
Logger
api_url : str
URL for MeCabService API
"""
def __init__(self, config=None, timezone=None, logger=None, *,
api_url=None, **kwargs):
"""
Parameters
----------
config : Config, default None
Configuration
timezone : timezone, default None
Timezone
logger : Logger, default None
Logger
api_url : str, default None
URL for MeCabService API.
If None trial URL is used.
"""
super().__init__(config=config, timezone=timezone, logger=logger)
if not api_url:
self.api_url = "https://api.uezo.net/mecab/parse"
self.logger.warning(
"Do not use default API URL for the production environment. "
"This is for trial use only. "
"Install MeCab and use MeCabTagger instead.")
else:
self.api_url = api_url
def parse(self, text):
"""
Parse and annotate using MeCab Service
Parameters
----------
text : str
Text to analyze
Returns
-------
words : list of minette.MeCabServiceNode
MeCabService nodes
"""
ret = []
if not text:
return ret
try:
parsed_json = requests.post(
self.api_url, headers={"content-type": "application/json"},
json={"text": text}, timeout=10).json()
ret = [MeCabServiceNode.create(
n["surface"], n["features"]) for n in parsed_json["nodes"]]
except Exception as ex:
self.logger.error(
"MeCab Service parsing error: "
+ str(ex) + "\n" + traceback.format_exc())
return ret
| 26.477273
| 77
| 0.5402
|
import traceback
import requests
from ..models import WordNode
from .base import Tagger
class MeCabServiceNode(WordNode):
@classmethod
def create(cls, surface, features):
return cls(
surface=surface,
part=features["part"],
part_detail1=features["part_detail1"],
part_detail2=features["part_detail2"],
part_detail3=features["part_detail3"],
stem_type=features["stem_type"],
stem_form=features["stem_form"],
word=features["word"],
kana=features["kana"],
pronunciation=features["pronunciation"]
)
class MeCabServiceTagger(Tagger):
def __init__(self, config=None, timezone=None, logger=None, *,
api_url=None, **kwargs):
super().__init__(config=config, timezone=timezone, logger=logger)
if not api_url:
self.api_url = "https://api.uezo.net/mecab/parse"
self.logger.warning(
"Do not use default API URL for the production environment. "
"This is for trial use only. "
"Install MeCab and use MeCabTagger instead.")
else:
self.api_url = api_url
def parse(self, text):
ret = []
if not text:
return ret
try:
parsed_json = requests.post(
self.api_url, headers={"content-type": "application/json"},
json={"text": text}, timeout=10).json()
ret = [MeCabServiceNode.create(
n["surface"], n["features"]) for n in parsed_json["nodes"]]
except Exception as ex:
self.logger.error(
"MeCab Service parsing error: "
+ str(ex) + "\n" + traceback.format_exc())
return ret
| true
| true
|
7905045aeaa6fbc76470b2a1bfaeeae461aaf147
| 545
|
py
|
Python
|
snaps/views.py
|
thuitafaith/My-gallery
|
7a752e4bfe6180d052336da364d0658306eefefe
|
[
"MIT"
] | null | null | null |
snaps/views.py
|
thuitafaith/My-gallery
|
7a752e4bfe6180d052336da364d0658306eefefe
|
[
"MIT"
] | null | null | null |
snaps/views.py
|
thuitafaith/My-gallery
|
7a752e4bfe6180d052336da364d0658306eefefe
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render,redirect
from .models import Image,Location,Category
# Create your views here.
def intro(request):
images = Image.objects.all()
return render(request, 'intro.html',{'images':images})
def search_results(request):
if 'image' in request.GET and request.GET["image"]:
search_term = request.GET.get("image")
searched_images = Image.search_by_cate(search_term)
message = f"{search_term}"
return render(request,'search.html',{"message":message,"images":searched_images})
| 38.928571
| 89
| 0.713761
|
from django.shortcuts import render,redirect
from .models import Image,Location,Category
def intro(request):
images = Image.objects.all()
return render(request, 'intro.html',{'images':images})
def search_results(request):
if 'image' in request.GET and request.GET["image"]:
search_term = request.GET.get("image")
searched_images = Image.search_by_cate(search_term)
message = f"{search_term}"
return render(request,'search.html',{"message":message,"images":searched_images})
| true
| true
|
790504a29ba2e1d53b75d3f3ec6fffc60661f7ed
| 2,373
|
py
|
Python
|
conf.py
|
Hiestaa/miniboard-factorio-manager
|
9ff5f1f063f17c0eaa47f43ac05bce0e74d90d45
|
[
"MIT"
] | null | null | null |
conf.py
|
Hiestaa/miniboard-factorio-manager
|
9ff5f1f063f17c0eaa47f43ac05bce0e74d90d45
|
[
"MIT"
] | null | null | null |
conf.py
|
Hiestaa/miniboard-factorio-manager
|
9ff5f1f063f17c0eaa47f43ac05bce0e74d90d45
|
[
"MIT"
] | null | null | null |
# -*- coding: utf8 -*-
from __future__ import unicode_literals
import logging
import netifaces
def getIpWindows(adapteridx):
try:
import wmi
except:
logging.error("You must need Win32com (win32 extensions for python)")
raise
adapters = wmi.WMI().Win32_NetworkAdapter()
wlan_int_id = adapters[adapteridx].Index
adaptername = adapters[adapteridx].NetConnectionID
ip = ''
for nic in wmi.WMI().Win32_NetworkAdapterConfiguration(IPEnabled=1):
if nic.Index == wlan_int_id:
ip = nic.IPAddress[0]
logging.info("[Windows] Showing IP for adapter %d (%s): %s",
adapteridx, adaptername, ip)
return ip
def filtre(addrInfo):
for typ, addrList in addrInfo.iteritems():
if len(addrList) == 0:
continue
for addrDetails in addrList:
if len(addrDetails.get('addr', '').split('.')) != 4:
continue
if not addrDetails.get('addr').startswith('192.168') and\
addrDetails.get('addr') != '127.0.0.1' and not \
addrDetails.get('addr').startswith('0'):
return addrDetails.get('addr')
def getIp(adapteridx):
adapters = netifaces.interfaces()
addrInfo = [netifaces.ifaddresses(a) for a in adapters]
addrInfo = [filtre(info) for info in addrInfo]
addrInfo = [info for info in addrInfo if info is not None]
return addrInfo[adapteridx % len(addrInfo)]
Conf = {
'state': 'DEBUG',
'log': {
'fileLevel': logging.WARNING
},
'database': {
'name': 'db/miniboard-factorio.db'
},
'server': {
'port': 15000,
'ip': '',
'assets': {
'minifiedCleanups': [
'http/assets/custom/css/',
'http/assets/custom/js/'
],
'minifyOnDebug': False
},
},
'factorio': {
'allowedPorts': sorted(
[34197, 34190, 34191, 34192, 34193]),
'savesFolder': (
'/Users/romain/Library/Application Support/factorio/saves'),
'binary': '/Applications/factorio.app',
'configFolder': (
'/Users/romain/Library/Application Support/factorio/config'),
'autosaveInterval': 15 # in minutes
}
}
| 29.6625
| 78
| 0.552465
|
from __future__ import unicode_literals
import logging
import netifaces
def getIpWindows(adapteridx):
try:
import wmi
except:
logging.error("You must need Win32com (win32 extensions for python)")
raise
adapters = wmi.WMI().Win32_NetworkAdapter()
wlan_int_id = adapters[adapteridx].Index
adaptername = adapters[adapteridx].NetConnectionID
ip = ''
for nic in wmi.WMI().Win32_NetworkAdapterConfiguration(IPEnabled=1):
if nic.Index == wlan_int_id:
ip = nic.IPAddress[0]
logging.info("[Windows] Showing IP for adapter %d (%s): %s",
adapteridx, adaptername, ip)
return ip
def filtre(addrInfo):
for typ, addrList in addrInfo.iteritems():
if len(addrList) == 0:
continue
for addrDetails in addrList:
if len(addrDetails.get('addr', '').split('.')) != 4:
continue
if not addrDetails.get('addr').startswith('192.168') and\
addrDetails.get('addr') != '127.0.0.1' and not \
addrDetails.get('addr').startswith('0'):
return addrDetails.get('addr')
def getIp(adapteridx):
adapters = netifaces.interfaces()
addrInfo = [netifaces.ifaddresses(a) for a in adapters]
addrInfo = [filtre(info) for info in addrInfo]
addrInfo = [info for info in addrInfo if info is not None]
return addrInfo[adapteridx % len(addrInfo)]
Conf = {
'state': 'DEBUG',
'log': {
'fileLevel': logging.WARNING
},
'database': {
'name': 'db/miniboard-factorio.db'
},
'server': {
'port': 15000,
'ip': '',
'assets': {
'minifiedCleanups': [
'http/assets/custom/css/',
'http/assets/custom/js/'
],
'minifyOnDebug': False
},
},
'factorio': {
'allowedPorts': sorted(
[34197, 34190, 34191, 34192, 34193]),
'savesFolder': (
'/Users/romain/Library/Application Support/factorio/saves'),
'binary': '/Applications/factorio.app',
'configFolder': (
'/Users/romain/Library/Application Support/factorio/config'),
'autosaveInterval': 15
}
}
| true
| true
|
790504e0eb2a1a60919a0445659c40001593891e
| 577
|
py
|
Python
|
FoodStore/migrations/0003_auto_20191229_0057.py
|
CPU-sangoma/PlentyPot
|
27e326f61e57746f5ca6701358d86c01b4a9ee31
|
[
"MIT"
] | null | null | null |
FoodStore/migrations/0003_auto_20191229_0057.py
|
CPU-sangoma/PlentyPot
|
27e326f61e57746f5ca6701358d86c01b4a9ee31
|
[
"MIT"
] | null | null | null |
FoodStore/migrations/0003_auto_20191229_0057.py
|
CPU-sangoma/PlentyPot
|
27e326f61e57746f5ca6701358d86c01b4a9ee31
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.6 on 2019-12-28 22:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FoodStore', '0002_auto_20191209_0246'),
]
operations = [
migrations.AddField(
model_name='foodhomepagemodel',
name='PageComplete',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='fullmenupagemodel',
name='PageComplete',
field=models.BooleanField(default=False),
),
]
| 24.041667
| 53
| 0.601386
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FoodStore', '0002_auto_20191209_0246'),
]
operations = [
migrations.AddField(
model_name='foodhomepagemodel',
name='PageComplete',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='fullmenupagemodel',
name='PageComplete',
field=models.BooleanField(default=False),
),
]
| true
| true
|
79050526f511afc82be832ed9640b0142da8eece
| 219
|
py
|
Python
|
_from_pydot/dev/request_test.py
|
owasp-sbot/pbx-gs-python-utils
|
f448aa36c4448fc04d30c3a5b25640ea4d44a267
|
[
"Apache-2.0"
] | 3
|
2018-12-14T15:43:46.000Z
|
2019-04-25T07:44:58.000Z
|
_from_pydot/dev/request_test.py
|
owasp-sbot/pbx-gs-python-utils
|
f448aa36c4448fc04d30c3a5b25640ea4d44a267
|
[
"Apache-2.0"
] | 1
|
2019-05-11T14:19:37.000Z
|
2019-05-11T14:51:04.000Z
|
_from_pydot/dev/request_test.py
|
owasp-sbot/pbx-gs-python-utils
|
f448aa36c4448fc04d30c3a5b25640ea4d44a267
|
[
"Apache-2.0"
] | 4
|
2018-12-27T04:54:14.000Z
|
2019-05-11T14:07:47.000Z
|
import requests
def run(event, context):
#return event.get('url') + 'aaa'
r = requests.get(event.get('url'))
return r.text
#return '...**^^.This is a request test for url: {0}'.format(event.get('url'))
| 27.375
| 82
| 0.611872
|
import requests
def run(event, context):
r = requests.get(event.get('url'))
return r.text
| true
| true
|
7905070232812013d675709620df247bfe9e5851
| 739
|
py
|
Python
|
tests/web/test_jsonrpc.py
|
spaceone/circuits
|
ed6d5464f1f83034109ed3d23d126c715450cfd2
|
[
"MIT"
] | null | null | null |
tests/web/test_jsonrpc.py
|
spaceone/circuits
|
ed6d5464f1f83034109ed3d23d126c715450cfd2
|
[
"MIT"
] | null | null | null |
tests/web/test_jsonrpc.py
|
spaceone/circuits
|
ed6d5464f1f83034109ed3d23d126c715450cfd2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from circuits import Component
from circuits.web import JSONRPC, Controller
from .helpers import urlopen
from .jsonrpclib import ServerProxy
class App(Component):
def eval(self, s):
return eval(s)
class Root(Controller):
def index(self):
return "Hello World!"
def test(webapp):
rpc = JSONRPC("/rpc")
test = App()
rpc.register(webapp)
test.register(webapp)
f = urlopen(webapp.server.http.base)
s = f.read()
assert s == b"Hello World!"
url = "%s/rpc" % webapp.server.http.base
jsonrpc = ServerProxy(url, allow_none=True, encoding='utf-8')
data = jsonrpc.eval("1 + 2")
assert data["result"] == 3
rpc.unregister()
test.unregister()
| 18.948718
| 65
| 0.645467
|
from circuits import Component
from circuits.web import JSONRPC, Controller
from .helpers import urlopen
from .jsonrpclib import ServerProxy
class App(Component):
def eval(self, s):
return eval(s)
class Root(Controller):
def index(self):
return "Hello World!"
def test(webapp):
rpc = JSONRPC("/rpc")
test = App()
rpc.register(webapp)
test.register(webapp)
f = urlopen(webapp.server.http.base)
s = f.read()
assert s == b"Hello World!"
url = "%s/rpc" % webapp.server.http.base
jsonrpc = ServerProxy(url, allow_none=True, encoding='utf-8')
data = jsonrpc.eval("1 + 2")
assert data["result"] == 3
rpc.unregister()
test.unregister()
| true
| true
|
790508e269ec2b7f761313c61358469d9f0a0d09
| 774
|
py
|
Python
|
mopidy_funkwhale/backend.py
|
gjabell/mopidy-funkwhale
|
a6ee6435514b2c14eaf3129afaba628e6bad7cb3
|
[
"Apache-2.0"
] | null | null | null |
mopidy_funkwhale/backend.py
|
gjabell/mopidy-funkwhale
|
a6ee6435514b2c14eaf3129afaba628e6bad7cb3
|
[
"Apache-2.0"
] | null | null | null |
mopidy_funkwhale/backend.py
|
gjabell/mopidy-funkwhale
|
a6ee6435514b2c14eaf3129afaba628e6bad7cb3
|
[
"Apache-2.0"
] | null | null | null |
from mopidy import backend
import pykka
from mopidy_funkwhale import api, client, library, playback, playlists
class FunkwhaleBackend(pykka.ThreadingActor, backend.Backend):
def __init__(self, config, audio):
super(FunkwhaleBackend, self).__init__()
self.api = api.FunkwhaleApi(config)
self.client = client.FunkwhaleClient(self.api)
self.audio = audio
self.library = library.FunkwhaleLibraryProvider(backend=self)
self.playback = playback.FunkwhalePlaybackProvider(audio=audio,
backend=self)
self.playlists = playlists.FunkwhalePlaylistsProvider(backend=self)
self.uri_schemes = ['funkwhale']
def on_start(self):
self.api.login()
| 35.181818
| 75
| 0.665375
|
from mopidy import backend
import pykka
from mopidy_funkwhale import api, client, library, playback, playlists
class FunkwhaleBackend(pykka.ThreadingActor, backend.Backend):
def __init__(self, config, audio):
super(FunkwhaleBackend, self).__init__()
self.api = api.FunkwhaleApi(config)
self.client = client.FunkwhaleClient(self.api)
self.audio = audio
self.library = library.FunkwhaleLibraryProvider(backend=self)
self.playback = playback.FunkwhalePlaybackProvider(audio=audio,
backend=self)
self.playlists = playlists.FunkwhalePlaylistsProvider(backend=self)
self.uri_schemes = ['funkwhale']
def on_start(self):
self.api.login()
| true
| true
|
79050926b22e092b980b1342a243c8c9ae322ed0
| 1,470
|
py
|
Python
|
gpuexperiments/deviceinfo.py
|
hughperkins/gpu-experiments
|
3e5064e45682494be97190558807672b602f1c76
|
[
"BSD-2-Clause"
] | 2
|
2016-07-05T05:52:18.000Z
|
2018-04-14T07:35:36.000Z
|
gpuexperiments/deviceinfo.py
|
hughperkins/gpu-experiments
|
3e5064e45682494be97190558807672b602f1c76
|
[
"BSD-2-Clause"
] | null | null | null |
gpuexperiments/deviceinfo.py
|
hughperkins/gpu-experiments
|
3e5064e45682494be97190558807672b602f1c76
|
[
"BSD-2-Clause"
] | null | null | null |
import pyopencl as cl
class DeviceInfo(object):
def __init__(self, device):
self.compute_units = device.get_info(cl.device_info.MAX_COMPUTE_UNITS)
self.maxShared = device.get_info(cl.device_info.LOCAL_MEM_SIZE) // 1024
self.compute_capability = (
device.get_info(cl.device_info.COMPUTE_CAPABILITY_MAJOR_NV),
device.get_info(cl.device_info.COMPUTE_CAPABILITY_MINOR_NV)
)
self.deviceName = device.get_info(cl.device_info.NAME)
self.deviceSimpleName = self.deviceName.replace(
'GeForce', '').replace('GTX', '').strip().replace(' ', '').lower()
print('deviceName', self.deviceName, 'compute capability', self.compute_capability)
print('compute units', self.compute_units, 'max shared memory', self.maxShared)
self.shared_memory_per_sm = None
# data comes from http://developer.download.nvidia.com/compute/cuda/CUDA_Occupancy_calculator.xls
if self.compute_capability[0] == 5:
if self.compute_capability[1] == 0:
self.shared_memory_per_sm = 65536
elif self.compute_capability[1] == 2:
self.shared_memory_per_sm = 98304
else:
raise Exception('compute capability %s not recognized' % compute_capability)
else:
raise Exception('compute capability %s not recognized' % compute_capability)
assert self.shared_memory_per_sm is not None
| 45.9375
| 105
| 0.666667
|
import pyopencl as cl
class DeviceInfo(object):
def __init__(self, device):
self.compute_units = device.get_info(cl.device_info.MAX_COMPUTE_UNITS)
self.maxShared = device.get_info(cl.device_info.LOCAL_MEM_SIZE) // 1024
self.compute_capability = (
device.get_info(cl.device_info.COMPUTE_CAPABILITY_MAJOR_NV),
device.get_info(cl.device_info.COMPUTE_CAPABILITY_MINOR_NV)
)
self.deviceName = device.get_info(cl.device_info.NAME)
self.deviceSimpleName = self.deviceName.replace(
'GeForce', '').replace('GTX', '').strip().replace(' ', '').lower()
print('deviceName', self.deviceName, 'compute capability', self.compute_capability)
print('compute units', self.compute_units, 'max shared memory', self.maxShared)
self.shared_memory_per_sm = None
if self.compute_capability[0] == 5:
if self.compute_capability[1] == 0:
self.shared_memory_per_sm = 65536
elif self.compute_capability[1] == 2:
self.shared_memory_per_sm = 98304
else:
raise Exception('compute capability %s not recognized' % compute_capability)
else:
raise Exception('compute capability %s not recognized' % compute_capability)
assert self.shared_memory_per_sm is not None
| true
| true
|
790509f88467c24b4bf309f5dc013897bd2f3184
| 3,602
|
py
|
Python
|
PyMailMainWindow.py
|
LolsonX/PyMail
|
85f091dde1e6555f68f090f7b3d49cfde13cc691
|
[
"MIT"
] | null | null | null |
PyMailMainWindow.py
|
LolsonX/PyMail
|
85f091dde1e6555f68f090f7b3d49cfde13cc691
|
[
"MIT"
] | null | null | null |
PyMailMainWindow.py
|
LolsonX/PyMail
|
85f091dde1e6555f68f090f7b3d49cfde13cc691
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QMainWindow, QAction, QMessageBox, QStatusBar
from PyMailConfigWindow import ConfigWindow
from PyMailReceiverModel import ReceiverModel
from PyMailReceiverView import ReceiverView
from PyMailSenderModel import SenderModel
from PyMailSenderWindow import SenderWindow
from PyMailSplitWidget import SplitWidget
from PyMailStartUpWindow import StartUpWindow
from PyMailToolBar import ToolBar
class PyMailMainWindow(QMainWindow):
def __init__(self, delegate):
super().__init__()
self.setWindowTitle("PyMail")
self.setWindowIcon(QIcon(r"res\logo.png"))
self.setCentralWidget(SplitWidget(self))
self.setMinimumWidth(800)
self.setMinimumHeight(600)
self.setupUI()
self.show()
self.addToolBar(ToolBar(self))
self.delegate = delegate
self.delegate.registerView(self)
self.setStatusBar(QStatusBar())
self.statusBar()
self.setStatusTip("Ready")
self.startUpWindow = StartUpWindow(self, self.delegate)
def setupUI(self):
self.setupMenuBar()
def setupMenuBar(self):
menuBar = self.menuBar()
self.setupFileMenu(menuBar)
self.setupEditMenu(menuBar)
self.setupOptionsMenu(menuBar)
self.setupHelpMenu(menuBar)
def setupFileMenu(self, menuBar):
fileMenu = menuBar.addMenu("File")
self.setFileMenuActions(fileMenu)
def setupEditMenu(self, menuBar):
editMenu = menuBar.addMenu("Edit")
def setupOptionsMenu(self, menuBar):
optionsMenu = menuBar.addMenu("Options")
settingsAction = QAction(QIcon(r"res\settings.png"), "Settings", optionsMenu)
settingsAction.setStatusTip("Settings")
settingsAction.triggered.connect(self.showSettings)
optionsMenu.addAction(settingsAction)
def setupHelpMenu(self, menuBar):
helpMenu = menuBar.addMenu("Help")
def setFileMenuActions(self, fileMenu):
exitAction = QAction(QIcon(r"res\exit.png"), "Exit", fileMenu)
exitAction.setShortcut("Ctrl+Q")
exitAction.triggered.connect(self.close)
fileMenu.addAction(exitAction)
def showSettings(self):
settingsView = ConfigWindow(self)
self.delegate.reset()
self.delegate.configView = settingsView
self.centralWidget().changeRightWidget(settingsView)
def showHelp(self):
pass
def receiveMail(self):
self.delegate.reset()
receiverView = ReceiverView()
self.delegate.receiverView = receiverView
receiverModel = ReceiverModel()
receiverModel.delegate = self.delegate
self.delegate.receiverModel = receiverModel
receiverView.delegate = self.delegate
self.centralWidget().changeLeftWidget(receiverView)
def showNewMail(self):
newMailView = SenderWindow()
newMailModel = SenderModel()
self.delegate.reset()
self.delegate.senderView = newMailView
self.delegate.senderModel = newMailModel
newMailView.delegate = self.delegate
newMailModel.delegate = self.delegate
newMailView.set_actions()
self.centralWidget().changeRightWidget(newMailView)
def closeEvent(self, event):
event.ignore()
self.exit()
def resizeEvent(self, event):
self.centralWidget().resizeWidget()
def exit(self):
msg = QMessageBox.question(None, "Exit PyMail", "Do You want to quit")
if msg == QMessageBox.Yes:
self.destroy()
sys.exit()
| 33.663551
| 85
| 0.68573
|
import sys
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QMainWindow, QAction, QMessageBox, QStatusBar
from PyMailConfigWindow import ConfigWindow
from PyMailReceiverModel import ReceiverModel
from PyMailReceiverView import ReceiverView
from PyMailSenderModel import SenderModel
from PyMailSenderWindow import SenderWindow
from PyMailSplitWidget import SplitWidget
from PyMailStartUpWindow import StartUpWindow
from PyMailToolBar import ToolBar
class PyMailMainWindow(QMainWindow):
def __init__(self, delegate):
super().__init__()
self.setWindowTitle("PyMail")
self.setWindowIcon(QIcon(r"res\logo.png"))
self.setCentralWidget(SplitWidget(self))
self.setMinimumWidth(800)
self.setMinimumHeight(600)
self.setupUI()
self.show()
self.addToolBar(ToolBar(self))
self.delegate = delegate
self.delegate.registerView(self)
self.setStatusBar(QStatusBar())
self.statusBar()
self.setStatusTip("Ready")
self.startUpWindow = StartUpWindow(self, self.delegate)
def setupUI(self):
self.setupMenuBar()
def setupMenuBar(self):
menuBar = self.menuBar()
self.setupFileMenu(menuBar)
self.setupEditMenu(menuBar)
self.setupOptionsMenu(menuBar)
self.setupHelpMenu(menuBar)
def setupFileMenu(self, menuBar):
fileMenu = menuBar.addMenu("File")
self.setFileMenuActions(fileMenu)
def setupEditMenu(self, menuBar):
editMenu = menuBar.addMenu("Edit")
def setupOptionsMenu(self, menuBar):
optionsMenu = menuBar.addMenu("Options")
settingsAction = QAction(QIcon(r"res\settings.png"), "Settings", optionsMenu)
settingsAction.setStatusTip("Settings")
settingsAction.triggered.connect(self.showSettings)
optionsMenu.addAction(settingsAction)
def setupHelpMenu(self, menuBar):
helpMenu = menuBar.addMenu("Help")
def setFileMenuActions(self, fileMenu):
exitAction = QAction(QIcon(r"res\exit.png"), "Exit", fileMenu)
exitAction.setShortcut("Ctrl+Q")
exitAction.triggered.connect(self.close)
fileMenu.addAction(exitAction)
def showSettings(self):
settingsView = ConfigWindow(self)
self.delegate.reset()
self.delegate.configView = settingsView
self.centralWidget().changeRightWidget(settingsView)
def showHelp(self):
pass
def receiveMail(self):
self.delegate.reset()
receiverView = ReceiverView()
self.delegate.receiverView = receiverView
receiverModel = ReceiverModel()
receiverModel.delegate = self.delegate
self.delegate.receiverModel = receiverModel
receiverView.delegate = self.delegate
self.centralWidget().changeLeftWidget(receiverView)
def showNewMail(self):
newMailView = SenderWindow()
newMailModel = SenderModel()
self.delegate.reset()
self.delegate.senderView = newMailView
self.delegate.senderModel = newMailModel
newMailView.delegate = self.delegate
newMailModel.delegate = self.delegate
newMailView.set_actions()
self.centralWidget().changeRightWidget(newMailView)
def closeEvent(self, event):
event.ignore()
self.exit()
def resizeEvent(self, event):
self.centralWidget().resizeWidget()
def exit(self):
msg = QMessageBox.question(None, "Exit PyMail", "Do You want to quit")
if msg == QMessageBox.Yes:
self.destroy()
sys.exit()
| true
| true
|
79050ba2ec191ab6f491fb441749fd2e15937ac7
| 1,122
|
py
|
Python
|
assets/tools/blockfacts.py
|
Clotonervo/TestCoin
|
16a97b165fba7a0d85d640e534060c60e7623bc2
|
[
"MIT"
] | null | null | null |
assets/tools/blockfacts.py
|
Clotonervo/TestCoin
|
16a97b165fba7a0d85d640e534060c60e7623bc2
|
[
"MIT"
] | null | null | null |
assets/tools/blockfacts.py
|
Clotonervo/TestCoin
|
16a97b165fba7a0d85d640e534060c60e7623bc2
|
[
"MIT"
] | null | null | null |
#Shows data from the first 1000 blocks
import random
import os
import subprocess
import json
#Set this to your raven-cli program
cli = "raven-cli"
#mode = "-testnet"
mode = ""
rpc_port = 8746
#Set this information in your raven.conf file (in datadir, not testnet3)
rpc_user = 'rpcuser'
rpc_pass = 'rpcpass555'
def rpc_call(params):
process = subprocess.Popen([cli, mode, params], stdout=subprocess.PIPE)
out, err = process.communicate()
return(out)
def get_blockinfo(num):
rpc_connection = get_rpc_connection()
hash = rpc_connection.getblockhash(num)
blockinfo = rpc_connection.getblock(hash)
return(blockinfo)
def get_rpc_connection():
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
connection = "http://%s:%s@127.0.0.1:%s"%(rpc_user, rpc_pass, rpc_port)
#print("Connection: " + connection)
rpc_connection = AuthServiceProxy(connection)
return(rpc_connection)
for i in range(1,1000):
dta = get_blockinfo(i)
print("Block #" + str(i))
print(dta.get('hash'))
print(dta.get('difficulty'))
print(dta.get('time'))
print("")
| 24.391304
| 75
| 0.703209
|
import random
import os
import subprocess
import json
cli = "raven-cli"
mode = ""
rpc_port = 8746
rpc_user = 'rpcuser'
rpc_pass = 'rpcpass555'
def rpc_call(params):
process = subprocess.Popen([cli, mode, params], stdout=subprocess.PIPE)
out, err = process.communicate()
return(out)
def get_blockinfo(num):
rpc_connection = get_rpc_connection()
hash = rpc_connection.getblockhash(num)
blockinfo = rpc_connection.getblock(hash)
return(blockinfo)
def get_rpc_connection():
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
connection = "http://%s:%s@127.0.0.1:%s"%(rpc_user, rpc_pass, rpc_port)
rpc_connection = AuthServiceProxy(connection)
return(rpc_connection)
for i in range(1,1000):
dta = get_blockinfo(i)
print("Block #" + str(i))
print(dta.get('hash'))
print(dta.get('difficulty'))
print(dta.get('time'))
print("")
| true
| true
|
79050ba489425a569eff2053b423e362fc9742b5
| 1,730
|
py
|
Python
|
setup.py
|
chadrik/doc484
|
597b421a398f5afcc5feb7abae376820fcc25876
|
[
"MIT"
] | 22
|
2017-07-24T22:12:01.000Z
|
2021-10-17T15:52:48.000Z
|
setup.py
|
chadrik/doc484
|
597b421a398f5afcc5feb7abae376820fcc25876
|
[
"MIT"
] | 1
|
2019-11-07T03:55:34.000Z
|
2019-11-07T04:08:09.000Z
|
setup.py
|
chadrik/doc484
|
597b421a398f5afcc5feb7abae376820fcc25876
|
[
"MIT"
] | 2
|
2018-09-25T22:48:16.000Z
|
2020-04-17T11:41:57.000Z
|
from setuptools import setup, find_packages
import os.path
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
with open(os.path.join(HERE, *parts)) as f:
return f.read()
setup(
name="doc484",
version="0.3.4",
author="Chad Dombrova",
description="Generate PEP 484 type comments from docstrings",
long_description=read("README.rst"),
license="MIT",
keywords=["mypy", "typing", "pep484", "docstrings", "annotations"],
url="https://github.com/chadrik/doc484",
packages=find_packages(),
entry_points={
'console_scripts': ['doc484=doc484.__main__:main'],
},
install_requires=[
"docutils", # only required for rest format
],
extras_require={
"tests": [
"coverage",
"pytest==3.6.2",
"tox==2.7.0",
],
},
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| 29.827586
| 77
| 0.590173
|
from setuptools import setup, find_packages
import os.path
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
with open(os.path.join(HERE, *parts)) as f:
return f.read()
setup(
name="doc484",
version="0.3.4",
author="Chad Dombrova",
description="Generate PEP 484 type comments from docstrings",
long_description=read("README.rst"),
license="MIT",
keywords=["mypy", "typing", "pep484", "docstrings", "annotations"],
url="https://github.com/chadrik/doc484",
packages=find_packages(),
entry_points={
'console_scripts': ['doc484=doc484.__main__:main'],
},
install_requires=[
"docutils",
],
extras_require={
"tests": [
"coverage",
"pytest==3.6.2",
"tox==2.7.0",
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| true
| true
|
79050c49a5a2f19a4b9d7a52d76dbcd8f259dc46
| 191
|
py
|
Python
|
Exercicios/ex002.py
|
Dobravoski/Exercicios-Python
|
e7169e1ee6954a7bc9216063845611107a13759f
|
[
"MIT"
] | null | null | null |
Exercicios/ex002.py
|
Dobravoski/Exercicios-Python
|
e7169e1ee6954a7bc9216063845611107a13759f
|
[
"MIT"
] | null | null | null |
Exercicios/ex002.py
|
Dobravoski/Exercicios-Python
|
e7169e1ee6954a7bc9216063845611107a13759f
|
[
"MIT"
] | null | null | null |
print('\033[1;33m--' * 10)
print('\033[1;32m EXERCÍCIO 002')
print('\033[1;33m--\033[m' * 10)
nome = input('\033[1;34mDigite seu nome: ')
print(f'É um prazer te conhecer, \033[1;33m{nome}!')
| 31.833333
| 52
| 0.633508
|
print('\033[1;33m--' * 10)
print('\033[1;32m EXERCÍCIO 002')
print('\033[1;33m--\033[m' * 10)
nome = input('\033[1;34mDigite seu nome: ')
print(f'É um prazer te conhecer, \033[1;33m{nome}!')
| true
| true
|
79050d6025b572c53d1183cca6cc50af7bca76a7
| 967
|
py
|
Python
|
cgi-bin/upload/compress.py
|
wsampaio/multi_agenda_py
|
72c9cf4d8b26827a9eba6de3119e63464d312aea
|
[
"CC-BY-4.0"
] | null | null | null |
cgi-bin/upload/compress.py
|
wsampaio/multi_agenda_py
|
72c9cf4d8b26827a9eba6de3119e63464d312aea
|
[
"CC-BY-4.0"
] | null | null | null |
cgi-bin/upload/compress.py
|
wsampaio/multi_agenda_py
|
72c9cf4d8b26827a9eba6de3119e63464d312aea
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python3
#
# Este arquivo é parte do programa multi_agenda
#
# Esta obra está licenciada com uma
# Licença Creative Commons Atribuição 4.0 Internacional.
# (CC BY 4.0 Internacional)
#
# Para ver uma cópia da licença, visite
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# WELLINGTON SAMPAIO - wsampaio@yahoo.com
# https://www.linkedin.com/in/wellsampaio/
#
import sys
import cgi
from os.path import dirname, realpath, sep, pardir
sys.path.append((dirname(realpath(__file__)) + sep + pardir))
import cgitb
cgitb.enable()
import objetos.dbConn.BackupMonitor as bkMonitor
form = cgi.FieldStorage()
#print("Content-type:text/html\r\n\r\n")
print("Content-type:text/text\r\n\r\n")
if "com" in str(form):
bkMonitor.criaBKP()
if "del" in str(form):
bkMonitor.removeArquivo(form.getvalue("file"))
if "dec" in str(form):
bkMonitor.extract(form.getvalue("file"))
if "info" in str(form):
bkMonitor.fileInfo(form.getvalue("file"))
| 20.574468
| 61
| 0.731127
|
import sys
import cgi
from os.path import dirname, realpath, sep, pardir
sys.path.append((dirname(realpath(__file__)) + sep + pardir))
import cgitb
cgitb.enable()
import objetos.dbConn.BackupMonitor as bkMonitor
form = cgi.FieldStorage()
print("Content-type:text/text\r\n\r\n")
if "com" in str(form):
bkMonitor.criaBKP()
if "del" in str(form):
bkMonitor.removeArquivo(form.getvalue("file"))
if "dec" in str(form):
bkMonitor.extract(form.getvalue("file"))
if "info" in str(form):
bkMonitor.fileInfo(form.getvalue("file"))
| true
| true
|
790510b25ea956c7fe3b519d94fcb59a9d94553f
| 709
|
py
|
Python
|
main.py
|
angli66/Image-Captioning
|
e6f06f3eaa0b4fbb960f5e5fea3f242ebe952c19
|
[
"MIT"
] | null | null | null |
main.py
|
angli66/Image-Captioning
|
e6f06f3eaa0b4fbb960f5e5fea3f242ebe952c19
|
[
"MIT"
] | null | null | null |
main.py
|
angli66/Image-Captioning
|
e6f06f3eaa0b4fbb960f5e5fea3f242ebe952c19
|
[
"MIT"
] | null | null | null |
################################################################################
# CSE 151B: Programming Assignment 4
# Code snippet by Ajit Kumar, Savyasachi
# Updated by Rohin
# Winter 2022
################################################################################
from experiment import Experiment
import sys
# Main Driver for your code. Either run `python main.py` which will run the experiment with default config
# or specify the configuration by running `python main.py custom`
if __name__ == "__main__":
exp_name = 'baseline'
if len(sys.argv) > 1:
exp_name = sys.argv[1]
print("Running Experiment: ", exp_name)
exp = Experiment(exp_name)
exp.run()
exp.test()
| 30.826087
| 106
| 0.545839
| true
| true
|
|
790510db3d3f8ec9b7320bb3ab6214b87ae4dbe3
| 10,991
|
py
|
Python
|
homeassistant/components/xiaomi_aqara.py
|
zanerv/home-assistant
|
aabc4d0bf488ba6d3035383fd22f891118b3e61b
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/xiaomi_aqara.py
|
zanerv/home-assistant
|
aabc4d0bf488ba6d3035383fd22f891118b3e61b
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/xiaomi_aqara.py
|
zanerv/home-assistant
|
aabc4d0bf488ba6d3035383fd22f891118b3e61b
|
[
"Apache-2.0"
] | null | null | null |
"""
Support for Xiaomi Gateways.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/xiaomi_aqara/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.discovery import SERVICE_XIAOMI_GW
from homeassistant.const import (
ATTR_BATTERY_LEVEL, CONF_HOST, CONF_MAC, CONF_PORT,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import callback
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
from homeassistant.util import slugify
REQUIREMENTS = ['PyXiaomiGateway==0.10.0']
_LOGGER = logging.getLogger(__name__)
ATTR_GW_MAC = 'gw_mac'
ATTR_RINGTONE_ID = 'ringtone_id'
ATTR_RINGTONE_VOL = 'ringtone_vol'
ATTR_DEVICE_ID = 'device_id'
CONF_DISCOVERY_RETRY = 'discovery_retry'
CONF_GATEWAYS = 'gateways'
CONF_INTERFACE = 'interface'
CONF_KEY = 'key'
CONF_DISABLE = 'disable'
DOMAIN = 'xiaomi_aqara'
PY_XIAOMI_GATEWAY = "xiaomi_gw"
TIME_TILL_UNAVAILABLE = timedelta(minutes=150)
SERVICE_PLAY_RINGTONE = 'play_ringtone'
SERVICE_STOP_RINGTONE = 'stop_ringtone'
SERVICE_ADD_DEVICE = 'add_device'
SERVICE_REMOVE_DEVICE = 'remove_device'
GW_MAC = vol.All(
cv.string,
lambda value: value.replace(':', '').lower(),
vol.Length(min=12, max=12)
)
SERVICE_SCHEMA_PLAY_RINGTONE = vol.Schema({
vol.Required(ATTR_RINGTONE_ID):
vol.All(vol.Coerce(int), vol.NotIn([9, 14, 15, 16, 17, 18, 19])),
vol.Optional(ATTR_RINGTONE_VOL):
vol.All(vol.Coerce(int), vol.Clamp(min=0, max=100))
})
SERVICE_SCHEMA_REMOVE_DEVICE = vol.Schema({
vol.Required(ATTR_DEVICE_ID):
vol.All(cv.string, vol.Length(min=14, max=14))
})
GATEWAY_CONFIG = vol.Schema({
vol.Optional(CONF_MAC, default=None): vol.Any(GW_MAC, None),
vol.Optional(CONF_KEY):
vol.All(cv.string, vol.Length(min=16, max=16)),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=9898): cv.port,
vol.Optional(CONF_DISABLE, default=False): cv.boolean,
})
def _fix_conf_defaults(config):
"""Update some configuration defaults."""
config['sid'] = config.pop(CONF_MAC, None)
if config.get(CONF_KEY) is None:
_LOGGER.warning(
'Key is not provided for gateway %s. Controlling the gateway '
'will not be possible', config['sid'])
if config.get(CONF_HOST) is None:
config.pop(CONF_PORT)
return config
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_GATEWAYS, default={}):
vol.All(cv.ensure_list, [GATEWAY_CONFIG], [_fix_conf_defaults]),
vol.Optional(CONF_INTERFACE, default='any'): cv.string,
vol.Optional(CONF_DISCOVERY_RETRY, default=3): cv.positive_int
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the Xiaomi component."""
gateways = []
interface = 'any'
discovery_retry = 3
if DOMAIN in config:
gateways = config[DOMAIN][CONF_GATEWAYS]
interface = config[DOMAIN][CONF_INTERFACE]
discovery_retry = config[DOMAIN][CONF_DISCOVERY_RETRY]
async def xiaomi_gw_discovered(service, discovery_info):
"""Perform action when Xiaomi Gateway device(s) has been found."""
# We don't need to do anything here, the purpose of Home Assistant's
# discovery service is to just trigger loading of this
# component, and then its own discovery process kicks in.
discovery.listen(hass, SERVICE_XIAOMI_GW, xiaomi_gw_discovered)
from xiaomi_gateway import XiaomiGatewayDiscovery
xiaomi = hass.data[PY_XIAOMI_GATEWAY] = XiaomiGatewayDiscovery(
hass.add_job, gateways, interface)
_LOGGER.debug("Expecting %s gateways", len(gateways))
for k in range(discovery_retry):
_LOGGER.info("Discovering Xiaomi Gateways (Try %s)", k + 1)
xiaomi.discover_gateways()
if len(xiaomi.gateways) >= len(gateways):
break
if not xiaomi.gateways:
_LOGGER.error("No gateway discovered")
return False
xiaomi.listen()
_LOGGER.debug("Gateways discovered. Listening for broadcasts")
for component in ['binary_sensor', 'sensor', 'switch', 'light', 'cover',
'lock']:
discovery.load_platform(hass, component, DOMAIN, {}, config)
def stop_xiaomi(event):
"""Stop Xiaomi Socket."""
_LOGGER.info("Shutting down Xiaomi Hub")
xiaomi.stop_listen()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_xiaomi)
def play_ringtone_service(call):
"""Service to play ringtone through Gateway."""
ring_id = call.data.get(ATTR_RINGTONE_ID)
gateway = call.data.get(ATTR_GW_MAC)
kwargs = {'mid': ring_id}
ring_vol = call.data.get(ATTR_RINGTONE_VOL)
if ring_vol is not None:
kwargs['vol'] = ring_vol
gateway.write_to_hub(gateway.sid, **kwargs)
def stop_ringtone_service(call):
"""Service to stop playing ringtone on Gateway."""
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, mid=10000)
def add_device_service(call):
"""Service to add a new sub-device within the next 30 seconds."""
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, join_permission='yes')
hass.components.persistent_notification.async_create(
'Join permission enabled for 30 seconds! '
'Please press the pairing button of the new device once.',
title='Xiaomi Aqara Gateway')
def remove_device_service(call):
"""Service to remove a sub-device from the gateway."""
device_id = call.data.get(ATTR_DEVICE_ID)
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, remove_device=device_id)
gateway_only_schema = _add_gateway_to_schema(xiaomi, vol.Schema({}))
hass.services.register(
DOMAIN, SERVICE_PLAY_RINGTONE, play_ringtone_service,
schema=_add_gateway_to_schema(xiaomi, SERVICE_SCHEMA_PLAY_RINGTONE))
hass.services.register(
DOMAIN, SERVICE_STOP_RINGTONE, stop_ringtone_service,
schema=gateway_only_schema)
hass.services.register(
DOMAIN, SERVICE_ADD_DEVICE, add_device_service,
schema=gateway_only_schema)
hass.services.register(
DOMAIN, SERVICE_REMOVE_DEVICE, remove_device_service,
schema=_add_gateway_to_schema(xiaomi, SERVICE_SCHEMA_REMOVE_DEVICE))
return True
class XiaomiDevice(Entity):
"""Representation a base Xiaomi device."""
def __init__(self, device, device_type, xiaomi_hub):
"""Initialize the Xiaomi device."""
self._state = None
self._is_available = True
self._sid = device['sid']
self._name = '{}_{}'.format(device_type, self._sid)
self._type = device_type
self._write_to_hub = xiaomi_hub.write_to_hub
self._get_from_hub = xiaomi_hub.get_from_hub
self._device_state_attributes = {}
self._remove_unavailability_tracker = None
self._xiaomi_hub = xiaomi_hub
self.parse_data(device['data'], device['raw_data'])
self.parse_voltage(device['data'])
if hasattr(self, '_data_key') \
and self._data_key: # pylint: disable=no-member
self._unique_id = slugify("{}-{}".format(
self._data_key, # pylint: disable=no-member
self._sid))
else:
self._unique_id = slugify("{}-{}".format(self._type, self._sid))
def _add_push_data_job(self, *args):
self.hass.add_job(self.push_data, *args)
async def async_added_to_hass(self):
"""Start unavailability tracking."""
self._xiaomi_hub.callbacks[self._sid].append(self._add_push_data_job)
self._async_track_unavailable()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
@property
def should_poll(self):
"""Return the polling state. No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._device_state_attributes
@callback
def _async_set_unavailable(self, now):
"""Set state to UNAVAILABLE."""
self._remove_unavailability_tracker = None
self._is_available = False
self.async_schedule_update_ha_state()
@callback
def _async_track_unavailable(self):
if self._remove_unavailability_tracker:
self._remove_unavailability_tracker()
self._remove_unavailability_tracker = async_track_point_in_utc_time(
self.hass, self._async_set_unavailable,
utcnow() + TIME_TILL_UNAVAILABLE)
if not self._is_available:
self._is_available = True
return True
return False
@callback
def push_data(self, data, raw_data):
"""Push from Hub."""
_LOGGER.debug("PUSH >> %s: %s", self, data)
was_unavailable = self._async_track_unavailable()
is_data = self.parse_data(data, raw_data)
is_voltage = self.parse_voltage(data)
if is_data or is_voltage or was_unavailable:
self.async_schedule_update_ha_state()
def parse_voltage(self, data):
"""Parse battery level data sent by gateway."""
if 'voltage' not in data:
return False
max_volt = 3300
min_volt = 2800
voltage = data['voltage']
voltage = min(voltage, max_volt)
voltage = max(voltage, min_volt)
percent = ((voltage - min_volt) / (max_volt - min_volt)) * 100
self._device_state_attributes[ATTR_BATTERY_LEVEL] = round(percent, 1)
return True
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
raise NotImplementedError()
def _add_gateway_to_schema(xiaomi, schema):
"""Extend a voluptuous schema with a gateway validator."""
def gateway(sid):
"""Convert sid to a gateway."""
sid = str(sid).replace(':', '').lower()
for gateway in xiaomi.gateways.values():
if gateway.sid == sid:
return gateway
raise vol.Invalid('Unknown gateway sid {}'.format(sid))
gateways = list(xiaomi.gateways.values())
kwargs = {}
# If the user has only 1 gateway, make it the default for services.
if len(gateways) == 1:
kwargs['default'] = gateways[0]
return schema.extend({
vol.Required(ATTR_GW_MAC, **kwargs): gateway
})
| 33.006006
| 77
| 0.672186
|
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.discovery import SERVICE_XIAOMI_GW
from homeassistant.const import (
ATTR_BATTERY_LEVEL, CONF_HOST, CONF_MAC, CONF_PORT,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import callback
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
from homeassistant.util import slugify
REQUIREMENTS = ['PyXiaomiGateway==0.10.0']
_LOGGER = logging.getLogger(__name__)
ATTR_GW_MAC = 'gw_mac'
ATTR_RINGTONE_ID = 'ringtone_id'
ATTR_RINGTONE_VOL = 'ringtone_vol'
ATTR_DEVICE_ID = 'device_id'
CONF_DISCOVERY_RETRY = 'discovery_retry'
CONF_GATEWAYS = 'gateways'
CONF_INTERFACE = 'interface'
CONF_KEY = 'key'
CONF_DISABLE = 'disable'
DOMAIN = 'xiaomi_aqara'
PY_XIAOMI_GATEWAY = "xiaomi_gw"
TIME_TILL_UNAVAILABLE = timedelta(minutes=150)
SERVICE_PLAY_RINGTONE = 'play_ringtone'
SERVICE_STOP_RINGTONE = 'stop_ringtone'
SERVICE_ADD_DEVICE = 'add_device'
SERVICE_REMOVE_DEVICE = 'remove_device'
GW_MAC = vol.All(
cv.string,
lambda value: value.replace(':', '').lower(),
vol.Length(min=12, max=12)
)
SERVICE_SCHEMA_PLAY_RINGTONE = vol.Schema({
vol.Required(ATTR_RINGTONE_ID):
vol.All(vol.Coerce(int), vol.NotIn([9, 14, 15, 16, 17, 18, 19])),
vol.Optional(ATTR_RINGTONE_VOL):
vol.All(vol.Coerce(int), vol.Clamp(min=0, max=100))
})
SERVICE_SCHEMA_REMOVE_DEVICE = vol.Schema({
vol.Required(ATTR_DEVICE_ID):
vol.All(cv.string, vol.Length(min=14, max=14))
})
GATEWAY_CONFIG = vol.Schema({
vol.Optional(CONF_MAC, default=None): vol.Any(GW_MAC, None),
vol.Optional(CONF_KEY):
vol.All(cv.string, vol.Length(min=16, max=16)),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=9898): cv.port,
vol.Optional(CONF_DISABLE, default=False): cv.boolean,
})
def _fix_conf_defaults(config):
config['sid'] = config.pop(CONF_MAC, None)
if config.get(CONF_KEY) is None:
_LOGGER.warning(
'Key is not provided for gateway %s. Controlling the gateway '
'will not be possible', config['sid'])
if config.get(CONF_HOST) is None:
config.pop(CONF_PORT)
return config
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_GATEWAYS, default={}):
vol.All(cv.ensure_list, [GATEWAY_CONFIG], [_fix_conf_defaults]),
vol.Optional(CONF_INTERFACE, default='any'): cv.string,
vol.Optional(CONF_DISCOVERY_RETRY, default=3): cv.positive_int
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
gateways = []
interface = 'any'
discovery_retry = 3
if DOMAIN in config:
gateways = config[DOMAIN][CONF_GATEWAYS]
interface = config[DOMAIN][CONF_INTERFACE]
discovery_retry = config[DOMAIN][CONF_DISCOVERY_RETRY]
async def xiaomi_gw_discovered(service, discovery_info):
discovery.listen(hass, SERVICE_XIAOMI_GW, xiaomi_gw_discovered)
from xiaomi_gateway import XiaomiGatewayDiscovery
xiaomi = hass.data[PY_XIAOMI_GATEWAY] = XiaomiGatewayDiscovery(
hass.add_job, gateways, interface)
_LOGGER.debug("Expecting %s gateways", len(gateways))
for k in range(discovery_retry):
_LOGGER.info("Discovering Xiaomi Gateways (Try %s)", k + 1)
xiaomi.discover_gateways()
if len(xiaomi.gateways) >= len(gateways):
break
if not xiaomi.gateways:
_LOGGER.error("No gateway discovered")
return False
xiaomi.listen()
_LOGGER.debug("Gateways discovered. Listening for broadcasts")
for component in ['binary_sensor', 'sensor', 'switch', 'light', 'cover',
'lock']:
discovery.load_platform(hass, component, DOMAIN, {}, config)
def stop_xiaomi(event):
_LOGGER.info("Shutting down Xiaomi Hub")
xiaomi.stop_listen()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_xiaomi)
def play_ringtone_service(call):
ring_id = call.data.get(ATTR_RINGTONE_ID)
gateway = call.data.get(ATTR_GW_MAC)
kwargs = {'mid': ring_id}
ring_vol = call.data.get(ATTR_RINGTONE_VOL)
if ring_vol is not None:
kwargs['vol'] = ring_vol
gateway.write_to_hub(gateway.sid, **kwargs)
def stop_ringtone_service(call):
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, mid=10000)
def add_device_service(call):
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, join_permission='yes')
hass.components.persistent_notification.async_create(
'Join permission enabled for 30 seconds! '
'Please press the pairing button of the new device once.',
title='Xiaomi Aqara Gateway')
def remove_device_service(call):
device_id = call.data.get(ATTR_DEVICE_ID)
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, remove_device=device_id)
gateway_only_schema = _add_gateway_to_schema(xiaomi, vol.Schema({}))
hass.services.register(
DOMAIN, SERVICE_PLAY_RINGTONE, play_ringtone_service,
schema=_add_gateway_to_schema(xiaomi, SERVICE_SCHEMA_PLAY_RINGTONE))
hass.services.register(
DOMAIN, SERVICE_STOP_RINGTONE, stop_ringtone_service,
schema=gateway_only_schema)
hass.services.register(
DOMAIN, SERVICE_ADD_DEVICE, add_device_service,
schema=gateway_only_schema)
hass.services.register(
DOMAIN, SERVICE_REMOVE_DEVICE, remove_device_service,
schema=_add_gateway_to_schema(xiaomi, SERVICE_SCHEMA_REMOVE_DEVICE))
return True
class XiaomiDevice(Entity):
def __init__(self, device, device_type, xiaomi_hub):
self._state = None
self._is_available = True
self._sid = device['sid']
self._name = '{}_{}'.format(device_type, self._sid)
self._type = device_type
self._write_to_hub = xiaomi_hub.write_to_hub
self._get_from_hub = xiaomi_hub.get_from_hub
self._device_state_attributes = {}
self._remove_unavailability_tracker = None
self._xiaomi_hub = xiaomi_hub
self.parse_data(device['data'], device['raw_data'])
self.parse_voltage(device['data'])
if hasattr(self, '_data_key') \
and self._data_key:
self._unique_id = slugify("{}-{}".format(
self._data_key,
self._sid))
else:
self._unique_id = slugify("{}-{}".format(self._type, self._sid))
def _add_push_data_job(self, *args):
self.hass.add_job(self.push_data, *args)
async def async_added_to_hass(self):
self._xiaomi_hub.callbacks[self._sid].append(self._add_push_data_job)
self._async_track_unavailable()
@property
def name(self):
return self._name
@property
def unique_id(self) -> str:
return self._unique_id
@property
def available(self):
return self._is_available
@property
def should_poll(self):
return False
@property
def device_state_attributes(self):
return self._device_state_attributes
@callback
def _async_set_unavailable(self, now):
self._remove_unavailability_tracker = None
self._is_available = False
self.async_schedule_update_ha_state()
@callback
def _async_track_unavailable(self):
if self._remove_unavailability_tracker:
self._remove_unavailability_tracker()
self._remove_unavailability_tracker = async_track_point_in_utc_time(
self.hass, self._async_set_unavailable,
utcnow() + TIME_TILL_UNAVAILABLE)
if not self._is_available:
self._is_available = True
return True
return False
@callback
def push_data(self, data, raw_data):
_LOGGER.debug("PUSH >> %s: %s", self, data)
was_unavailable = self._async_track_unavailable()
is_data = self.parse_data(data, raw_data)
is_voltage = self.parse_voltage(data)
if is_data or is_voltage or was_unavailable:
self.async_schedule_update_ha_state()
def parse_voltage(self, data):
if 'voltage' not in data:
return False
max_volt = 3300
min_volt = 2800
voltage = data['voltage']
voltage = min(voltage, max_volt)
voltage = max(voltage, min_volt)
percent = ((voltage - min_volt) / (max_volt - min_volt)) * 100
self._device_state_attributes[ATTR_BATTERY_LEVEL] = round(percent, 1)
return True
def parse_data(self, data, raw_data):
raise NotImplementedError()
def _add_gateway_to_schema(xiaomi, schema):
def gateway(sid):
sid = str(sid).replace(':', '').lower()
for gateway in xiaomi.gateways.values():
if gateway.sid == sid:
return gateway
raise vol.Invalid('Unknown gateway sid {}'.format(sid))
gateways = list(xiaomi.gateways.values())
kwargs = {}
if len(gateways) == 1:
kwargs['default'] = gateways[0]
return schema.extend({
vol.Required(ATTR_GW_MAC, **kwargs): gateway
})
| true
| true
|
790510ff384984529640b134febeb8ef025d5866
| 2,689
|
py
|
Python
|
insomniac/__init__.py
|
felipe4334/Insomniac
|
8e405ee65c995f90650dfeac682d4ae3cf730b23
|
[
"MIT"
] | null | null | null |
insomniac/__init__.py
|
felipe4334/Insomniac
|
8e405ee65c995f90650dfeac682d4ae3cf730b23
|
[
"MIT"
] | null | null | null |
insomniac/__init__.py
|
felipe4334/Insomniac
|
8e405ee65c995f90650dfeac682d4ae3cf730b23
|
[
"MIT"
] | null | null | null |
import argparse
import json
import insomniac.__version__ as __version__
from insomniac import network
from insomniac.activation import activation_controller
from insomniac.network import HTTP_OK
from insomniac.params import parse_arguments
from insomniac.utils import *
def run(activation_code="", starter_conf_file_path=None):
if not __version__.__debug_mode__:
print_timeless(COLOR_OKGREEN + __version__.__logo__ + COLOR_ENDC)
print_version()
activation_code_from_args = _get_activation_code_from_args()
if activation_code_from_args is not None:
activation_code = activation_code_from_args
activation_controller.validate(activation_code)
if not activation_controller.is_activated:
from insomniac.session import InsomniacSession
print_timeless("Using insomniac session-manager without extra-features")
insomniac_session = InsomniacSession(starter_conf_file_path)
else:
from insomniac.extra_features.session import ExtendedInsomniacSession
insomniac_session = ExtendedInsomniacSession(starter_conf_file_path)
insomniac_session.run()
def is_newer_version_available():
def versiontuple(v):
return tuple(map(int, (v.split("."))))
current_version = __version__.__version__
latest_version = _get_latest_version('insomniac')
if latest_version is not None and versiontuple(latest_version) > versiontuple(current_version):
return True, latest_version
return False, None
def print_version():
print_timeless_ui(COLOR_HEADER + f"Engine v{__version__.__version__}" + COLOR_ENDC)
is_new_version_available, latest_version = is_newer_version_available()
if is_new_version_available and insomniac_globals.is_insomniac():
print_timeless(COLOR_HEADER + f"Newer version is available (v{latest_version}). Please run" + COLOR_ENDC)
print_timeless(COLOR_HEADER + COLOR_BOLD + "python3 -m pip install insomniac --upgrade" + COLOR_ENDC)
print_timeless("")
def _get_latest_version(package):
latest_version = None
code, body, _ = network.get(f"https://pypi.python.org/pypi/{package}/json")
if code == HTTP_OK and body is not None:
json_package = json.loads(body)
latest_version = json_package['info']['version']
return latest_version
def _get_activation_code_from_args():
parser = ArgumentParser(add_help=False)
parser.add_argument('--activation-code')
try:
args, _ = parser.parse_known_args()
except (argparse.ArgumentError, TypeError):
return None
return args.activation_code
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
pass
| 34.922078
| 113
| 0.755299
|
import argparse
import json
import insomniac.__version__ as __version__
from insomniac import network
from insomniac.activation import activation_controller
from insomniac.network import HTTP_OK
from insomniac.params import parse_arguments
from insomniac.utils import *
def run(activation_code="", starter_conf_file_path=None):
if not __version__.__debug_mode__:
print_timeless(COLOR_OKGREEN + __version__.__logo__ + COLOR_ENDC)
print_version()
activation_code_from_args = _get_activation_code_from_args()
if activation_code_from_args is not None:
activation_code = activation_code_from_args
activation_controller.validate(activation_code)
if not activation_controller.is_activated:
from insomniac.session import InsomniacSession
print_timeless("Using insomniac session-manager without extra-features")
insomniac_session = InsomniacSession(starter_conf_file_path)
else:
from insomniac.extra_features.session import ExtendedInsomniacSession
insomniac_session = ExtendedInsomniacSession(starter_conf_file_path)
insomniac_session.run()
def is_newer_version_available():
def versiontuple(v):
return tuple(map(int, (v.split("."))))
current_version = __version__.__version__
latest_version = _get_latest_version('insomniac')
if latest_version is not None and versiontuple(latest_version) > versiontuple(current_version):
return True, latest_version
return False, None
def print_version():
print_timeless_ui(COLOR_HEADER + f"Engine v{__version__.__version__}" + COLOR_ENDC)
is_new_version_available, latest_version = is_newer_version_available()
if is_new_version_available and insomniac_globals.is_insomniac():
print_timeless(COLOR_HEADER + f"Newer version is available (v{latest_version}). Please run" + COLOR_ENDC)
print_timeless(COLOR_HEADER + COLOR_BOLD + "python3 -m pip install insomniac --upgrade" + COLOR_ENDC)
print_timeless("")
def _get_latest_version(package):
latest_version = None
code, body, _ = network.get(f"https://pypi.python.org/pypi/{package}/json")
if code == HTTP_OK and body is not None:
json_package = json.loads(body)
latest_version = json_package['info']['version']
return latest_version
def _get_activation_code_from_args():
parser = ArgumentParser(add_help=False)
parser.add_argument('--activation-code')
try:
args, _ = parser.parse_known_args()
except (argparse.ArgumentError, TypeError):
return None
return args.activation_code
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
pass
| true
| true
|
790512962a88642aac0fd289c5d4f2976e7fe28a
| 2,091
|
py
|
Python
|
PredictPrice.py
|
AlirezaMojtabavi/Predict-Price-of-an-Apartment-in-Tehran
|
1b9a7ede8369f31954ba274ef9cb9a4d5ab1762a
|
[
"MIT"
] | null | null | null |
PredictPrice.py
|
AlirezaMojtabavi/Predict-Price-of-an-Apartment-in-Tehran
|
1b9a7ede8369f31954ba274ef9cb9a4d5ab1762a
|
[
"MIT"
] | null | null | null |
PredictPrice.py
|
AlirezaMojtabavi/Predict-Price-of-an-Apartment-in-Tehran
|
1b9a7ede8369f31954ba274ef9cb9a4d5ab1762a
|
[
"MIT"
] | null | null | null |
import numpy
import mysql.connector
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn import tree
##--------------------------Catch Data from data base--------------------------------
cnx = mysql.connector.connect(user = [type your user] , password = [type your password] ,
host = [type your host] , database = [type your database name] )
cur = cnx.cursor()
cur.execute("SELECT Neighborhood, Area, rooms, Antiquity FROM specifications")
inputData = cur.fetchall()
cur.execute("SELECT Price FROM specifications")
outputData = cur.fetchall()
if cur:
cur.close()
if cnx:
cnx.close()
## TestData
newApartments = [['ولنجک', 120, '2','2'],
['میرداماد', 110, '2','0'],
['هروی', 200, '4','2']]
for i in newApartments: ## Add newApartments to input of table
inputData.append(i)
Neighborhood = list()
Area = list()
rooms = list()
Antiquity = list()
for i in inputData :
Neighborhood.append(i[0])
Area.append(i[1])
rooms.append(i[2])
Antiquity.append(i[3])
# Encode Neighborhood
values = numpy.array(Neighborhood)
# integer encode
labelEncoder = LabelEncoder()
integer_encoded = labelEncoder.fit_transform(values)
# binary encode
NeighborhoodOHE = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
NeighborhoodOHE = NeighborhoodOHE.fit_transform(integer_encoded)
test= Area+rooms
x = numpy.column_stack((NeighborhoodOHE, Area,rooms, Antiquity))
y = outputData
print(x[1])
print(len(x))
print(len(x[1]))
temp = numpy.split(x, [(-1)*len(newApartments)])
x = temp[0]
newApartments_enc = temp[1]
# Start training and testing
clf = tree.DecisionTreeClassifier()
clf = clf.fit(x, y)
# Encode New Apartment
answer = clf.predict(newApartments_enc)
for i in range(len(answer)):
print("The price of Apartment in %s with %i metters Area, is approaximately %s Tomans." % (newApartments[i][0],newApartments[i][1], answer[i]))
| 29.450704
| 148
| 0.66045
|
import numpy
import mysql.connector
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn import tree
word] ,
host = [type your host] , database = [type your database name] )
cur = cnx.cursor()
cur.execute("SELECT Neighborhood, Area, rooms, Antiquity FROM specifications")
inputData = cur.fetchall()
cur.execute("SELECT Price FROM specifications")
outputData = cur.fetchall()
if cur:
cur.close()
if cnx:
cnx.close()
ments = [['ولنجک', 120, '2','2'],
['میرداماد', 110, '2','0'],
['هروی', 200, '4','2']]
for i in newApartments: od = list()
Area = list()
rooms = list()
Antiquity = list()
for i in inputData :
Neighborhood.append(i[0])
Area.append(i[1])
rooms.append(i[2])
Antiquity.append(i[3])
values = numpy.array(Neighborhood)
labelEncoder = LabelEncoder()
integer_encoded = labelEncoder.fit_transform(values)
NeighborhoodOHE = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
NeighborhoodOHE = NeighborhoodOHE.fit_transform(integer_encoded)
test= Area+rooms
x = numpy.column_stack((NeighborhoodOHE, Area,rooms, Antiquity))
y = outputData
print(x[1])
print(len(x))
print(len(x[1]))
temp = numpy.split(x, [(-1)*len(newApartments)])
x = temp[0]
newApartments_enc = temp[1]
clf = tree.DecisionTreeClassifier()
clf = clf.fit(x, y)
answer = clf.predict(newApartments_enc)
for i in range(len(answer)):
print("The price of Apartment in %s with %i metters Area, is approaximately %s Tomans." % (newApartments[i][0],newApartments[i][1], answer[i]))
| false
| true
|
7905143833768dd4a20fbcee397f1603b72fdf21
| 1,101
|
py
|
Python
|
src/app/fs.py
|
sgavka/multi_public_poll_bot
|
b20dd2b205312370ac10c176a547cf2d104519c7
|
[
"MIT"
] | 19
|
2018-08-07T13:25:03.000Z
|
2021-02-13T14:40:18.000Z
|
src/app/fs.py
|
sgavka/multi_public_poll_bot
|
b20dd2b205312370ac10c176a547cf2d104519c7
|
[
"MIT"
] | 1
|
2019-06-13T22:21:19.000Z
|
2019-12-17T16:41:00.000Z
|
src/app/fs.py
|
sgavka/multi_public_poll_bot
|
b20dd2b205312370ac10c176a547cf2d104519c7
|
[
"MIT"
] | 7
|
2018-04-24T17:08:15.000Z
|
2021-11-12T11:37:44.000Z
|
"""
file system and database initialization.
tables:
- polls:
- id PRIMARY KEY
- owner_id => users.id
- topic
- users:
- id PRIMARY KEY
- first_name
- last_name
- username
- answers:
- id PRIMARY KEY
- poll_id => polls.id
- text
- votes:
- user_id => users.id
- poll_id => polls.id
- answer_id => answers.id
"""
import os
from os.path import expanduser, join
from yoyo import get_backend, read_migrations
from . import log
logger = log.getLogger('app.fs')
DATA_DIR: str = expanduser("~/.local/share/multi_vote_bot")
if not os.path.exists(DATA_DIR):
logger.info("Creating data dir at path %s", DATA_DIR)
os.makedirs(DATA_DIR, exist_ok=True)
DB_PATH: str = join(DATA_DIR, "data.db")
def migrate():
""" apply yoyo migrations """
logger.info("Migrating to the latest schema")
log.getLogger('yoyo').setLevel(log.DEBUG)
backend = get_backend('sqlite:///' + DB_PATH)
migrations = read_migrations('./migrations')
with backend.lock():
backend.apply_migrations(backend.to_apply(migrations))
# auto migrate when imported
migrate()
| 19.315789
| 62
| 0.679382
|
import os
from os.path import expanduser, join
from yoyo import get_backend, read_migrations
from . import log
logger = log.getLogger('app.fs')
DATA_DIR: str = expanduser("~/.local/share/multi_vote_bot")
if not os.path.exists(DATA_DIR):
logger.info("Creating data dir at path %s", DATA_DIR)
os.makedirs(DATA_DIR, exist_ok=True)
DB_PATH: str = join(DATA_DIR, "data.db")
def migrate():
logger.info("Migrating to the latest schema")
log.getLogger('yoyo').setLevel(log.DEBUG)
backend = get_backend('sqlite:///' + DB_PATH)
migrations = read_migrations('./migrations')
with backend.lock():
backend.apply_migrations(backend.to_apply(migrations))
migrate()
| true
| true
|
7905150224459f866e184f0f054cc29291b92d46
| 84
|
py
|
Python
|
baloo/core/indexes/__init__.py
|
cda-group/baloo
|
0d442117c2a919b177e0a96024cbdc82762cb646
|
[
"BSD-3-Clause"
] | 11
|
2018-12-16T00:19:39.000Z
|
2021-01-06T04:56:02.000Z
|
baloo/core/indexes/__init__.py
|
monner/baloo
|
f6e05e35b73a75e8a300754c6bdc575e5f2d53b9
|
[
"BSD-3-Clause"
] | 6
|
2019-02-21T23:22:14.000Z
|
2021-06-01T22:39:32.000Z
|
baloo/core/indexes/__init__.py
|
monner/baloo
|
f6e05e35b73a75e8a300754c6bdc575e5f2d53b9
|
[
"BSD-3-Clause"
] | 6
|
2019-02-12T14:30:43.000Z
|
2020-03-15T17:17:56.000Z
|
from .base import Index
from .multi import MultiIndex
from .range import RangeIndex
| 21
| 29
| 0.821429
|
from .base import Index
from .multi import MultiIndex
from .range import RangeIndex
| true
| true
|
790515c8cf3166a993914ebc971c2ce9dcc36d51
| 4,565
|
py
|
Python
|
horovod/spark/keras/tensorflow.py
|
zmldndx/horovod
|
89175b7381e44f5eb3023d7bc22ba768b31fee53
|
[
"Apache-2.0"
] | 4
|
2019-05-07T06:56:17.000Z
|
2020-06-02T21:07:50.000Z
|
horovod/spark/keras/tensorflow.py
|
kyocen/horovod
|
e9b1e228ff92eb7f65d9aea2d36f23b327df28bd
|
[
"Apache-2.0"
] | 1
|
2020-08-14T16:55:36.000Z
|
2020-09-03T18:32:24.000Z
|
horovod/spark/keras/tensorflow.py
|
kyocen/horovod
|
e9b1e228ff92eb7f65d9aea2d36f23b327df28bd
|
[
"Apache-2.0"
] | 3
|
2019-09-17T06:09:09.000Z
|
2022-03-09T03:21:42.000Z
|
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
import json
from six.moves import zip
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import optimizers
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import serialization
def save_tf_keras_optimizer(optimizer, h5py_file):
if isinstance(optimizer, optimizers.TFOptimizer):
logging.warning(
'TensorFlow optimizers do not '
'make it possible to access '
'optimizer attributes or optimizer state '
'after instantiation. '
'As a result, we cannot save the optimizer '
'as part of the model save file.'
'You will have to compile your model again after loading it. '
'Prefer using a Keras optimizer instead '
'(see keras.io/optimizers).')
else:
h5py_file.attrs['training_config'] = json.dumps(
{
'optimizer_config': {
'class_name': optimizer.__class__.__name__,
'config': optimizer.get_config()
}
},
default=serialization.get_json_type).encode('utf8')
# Save optimizer weights.
symbolic_weights = getattr(optimizer, 'weights')
if symbolic_weights:
optimizer_weights_group = h5py_file.create_group('optimizer_weights')
weight_values = K.batch_get_value(symbolic_weights)
weight_names = []
for w, val in zip(symbolic_weights, weight_values):
name = str(w.name)
weight_names.append(name.encode('utf8'))
optimizer_weights_group.attrs['weight_names'] = weight_names
for name, val in zip(weight_names, weight_values):
param_dset = optimizer_weights_group.create_dataset(
name, val.shape, dtype=val.dtype)
if not val.shape:
# scalar
param_dset[()] = val
else:
param_dset[:] = val
h5py_file.flush()
def load_tf_keras_optimizer(h5py_file, custom_objects=None):
if not custom_objects:
custom_objects = {}
def convert_custom_objects(obj):
"""Handles custom object lookup.
Arguments:
obj: object, dict, or list.
Returns:
The same structure, where occurrences
of a custom object name have been replaced
with the custom object.
"""
if isinstance(obj, list):
deserialized = []
for value in obj:
deserialized.append(convert_custom_objects(value))
return deserialized
if isinstance(obj, dict):
deserialized = {}
for key, value in obj.items():
deserialized[key] = convert_custom_objects(value)
return deserialized
if obj in custom_objects:
return custom_objects[obj]
return obj
optimizer, optimizer_weight_values = None, None
# instantiate optimizer
training_config = h5py_file.attrs.get('training_config')
training_config = json.loads(training_config.decode('utf-8'))
optimizer_config = training_config['optimizer_config']
optimizer = optimizers.deserialize(optimizer_config, custom_objects=custom_objects)
if 'optimizer_weights' in h5py_file:
optimizer_weights_group = h5py_file['optimizer_weights']
optimizer_weight_names = [
n.decode('utf8')
for n in optimizer_weights_group.attrs['weight_names']
]
optimizer_weight_values = [optimizer_weights_group[n].value for n in
optimizer_weight_names]
if optimizer_weight_values:
optimizer.set_weights(optimizer_weight_values)
return optimizer
| 38.686441
| 87
| 0.62782
|
from __future__ import absolute_import
import json
from six.moves import zip
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import optimizers
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import serialization
def save_tf_keras_optimizer(optimizer, h5py_file):
if isinstance(optimizer, optimizers.TFOptimizer):
logging.warning(
'TensorFlow optimizers do not '
'make it possible to access '
'optimizer attributes or optimizer state '
'after instantiation. '
'As a result, we cannot save the optimizer '
'as part of the model save file.'
'You will have to compile your model again after loading it. '
'Prefer using a Keras optimizer instead '
'(see keras.io/optimizers).')
else:
h5py_file.attrs['training_config'] = json.dumps(
{
'optimizer_config': {
'class_name': optimizer.__class__.__name__,
'config': optimizer.get_config()
}
},
default=serialization.get_json_type).encode('utf8')
symbolic_weights = getattr(optimizer, 'weights')
if symbolic_weights:
optimizer_weights_group = h5py_file.create_group('optimizer_weights')
weight_values = K.batch_get_value(symbolic_weights)
weight_names = []
for w, val in zip(symbolic_weights, weight_values):
name = str(w.name)
weight_names.append(name.encode('utf8'))
optimizer_weights_group.attrs['weight_names'] = weight_names
for name, val in zip(weight_names, weight_values):
param_dset = optimizer_weights_group.create_dataset(
name, val.shape, dtype=val.dtype)
if not val.shape:
param_dset[()] = val
else:
param_dset[:] = val
h5py_file.flush()
def load_tf_keras_optimizer(h5py_file, custom_objects=None):
if not custom_objects:
custom_objects = {}
def convert_custom_objects(obj):
if isinstance(obj, list):
deserialized = []
for value in obj:
deserialized.append(convert_custom_objects(value))
return deserialized
if isinstance(obj, dict):
deserialized = {}
for key, value in obj.items():
deserialized[key] = convert_custom_objects(value)
return deserialized
if obj in custom_objects:
return custom_objects[obj]
return obj
optimizer, optimizer_weight_values = None, None
training_config = h5py_file.attrs.get('training_config')
training_config = json.loads(training_config.decode('utf-8'))
optimizer_config = training_config['optimizer_config']
optimizer = optimizers.deserialize(optimizer_config, custom_objects=custom_objects)
if 'optimizer_weights' in h5py_file:
optimizer_weights_group = h5py_file['optimizer_weights']
optimizer_weight_names = [
n.decode('utf8')
for n in optimizer_weights_group.attrs['weight_names']
]
optimizer_weight_values = [optimizer_weights_group[n].value for n in
optimizer_weight_names]
if optimizer_weight_values:
optimizer.set_weights(optimizer_weight_values)
return optimizer
| true
| true
|
7905160bb3648d032a60702559d06964217e6bdb
| 66,711
|
py
|
Python
|
ocsmesh/mesh/mesh.py
|
noaa-ocs-modeling/OCSMesh
|
d7f97196a0174f3818bfa036a18088acbeff4c78
|
[
"CC0-1.0"
] | null | null | null |
ocsmesh/mesh/mesh.py
|
noaa-ocs-modeling/OCSMesh
|
d7f97196a0174f3818bfa036a18088acbeff4c78
|
[
"CC0-1.0"
] | 1
|
2021-11-19T01:10:10.000Z
|
2021-11-19T15:39:56.000Z
|
ocsmesh/mesh/mesh.py
|
noaa-ocs-modeling/OCSMesh
|
d7f97196a0174f3818bfa036a18088acbeff4c78
|
[
"CC0-1.0"
] | 1
|
2021-11-19T00:49:41.000Z
|
2021-11-19T00:49:41.000Z
|
"""This module defines classes that handle mesh and mesh operations.
This module defines a factory class for mesh, similar to geometry and
size function factory class. It also defines concrete mesh types.
Currently two concrete mesh types are defined for generic Eucledian
mesh and specific 2D Eucledian mesh.
"""
from functools import lru_cache
import logging
from multiprocessing import Pool, cpu_count
import os
import pathlib
from collections import defaultdict
import warnings
from typing import Union, List, Tuple, Dict, Any, Optional
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
import pandas as pd
import geopandas as gpd
from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk
from matplotlib.path import Path
from matplotlib.transforms import Bbox
from matplotlib.tri import Triangulation
from matplotlib.axes import Axes
import matplotlib.pyplot as plt
import numpy as np
import numpy.typing as npt
from pyproj import CRS, Transformer
from scipy.interpolate import (
RectBivariateSpline, RegularGridInterpolator)
from shapely.geometry import (
LineString, box, Polygon, MultiPolygon)
from shapely.ops import polygonize, linemerge
from ocsmesh import utils
from ocsmesh.raster import Raster
from ocsmesh.mesh.base import BaseMesh
from ocsmesh.mesh.parsers import grd, sms2dm
_logger = logging.getLogger(__name__)
class EuclideanMesh(BaseMesh):
"""Generic Euclidean mesh class
This is the base class for 2D or 3D Euclidean mesh.
Attributes
----------
tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t]
Reference to underlying jigsaw mesh's triangle element
structure.
triangles : npt.NDArray[np.float32]
Array of node index for triangular elements.
quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t]
Reference to underlying jigsaw mesh's quadrangle element
structure.
quads : npt.NDArray[np.float32]
Array of node index for quadrangular elements.
crs : CRS
Coodrinate reference system of the mesh object
hull : Hull
Handle to hull calculation helper object
nodes : Nodes
Handle to node handler helper object
elements : Elements
Handle to element handler helper object
Methods
-------
write(path, overwrite=False, format='grd')
Export mesh object to the disk in the specified format.
"""
def __init__(self, mesh: jigsaw_msh_t) -> None:
"""Initialize Euclidean mesh object.
Parameters
----------
mesh : jigsaw_msh_t
The underlying jigsaw_msh_t object to hold onto mesh data.
Raises
------
TypeError
If input mesh is not of `jigsaw_msh_t` type.
ValueError
If input mesh's `mshID` is not equal to ``euclidean-mesh``.
If input mesh has `crs` property which is not of `CRS` type.
"""
if not isinstance(mesh, jigsaw_msh_t):
raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, '
f'not type {type(mesh)}.')
if mesh.mshID != 'euclidean-mesh':
raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, '
"but expected 'euclidean-mesh'.")
if not hasattr(mesh, 'crs'):
warnings.warn('Input mesh has no CRS information.')
mesh.crs = None
else:
if not isinstance(mesh.crs, CRS):
raise ValueError(f'crs property must be of type {CRS}, not '
f'type {type(mesh.crs)}.')
self._hull = None
self._nodes = None
self._elements = None
self._msh_t = mesh
def write(
self,
path: Union[str, os.PathLike],
overwrite: bool = False,
format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622
) -> None:
"""Export the mesh object to the disk
Parameters
----------
path : path-like
Path to which the mesh should be exported.
overwrite : bool, default=False
Whether to overwrite, if a file already exists in `path`
format : { 'grd', '2dm', 'msh', 'vtk' }
Format of the export, SMS-2DM or GRD.
Returns
-------
None
Raises
------
ValueError
If specified export format is **not** supported.
"""
path = pathlib.Path(path)
if path.exists() and overwrite is not True:
raise IOError(
f'File {str(path)} exists and overwrite is not True.')
if format == 'grd':
grd_dict = utils.msh_t_to_grd(self.msh_t)
if self._boundaries and self._boundaries.data:
grd_dict.update(boundaries=self._boundaries.data)
grd.write(grd_dict, path, overwrite)
elif format == '2dm':
sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite)
elif format == 'msh':
savemsh(str(path), self.msh_t)
elif format == 'vtk':
savevtk(str(path), self.msh_t)
else:
raise ValueError(f'Unhandled format {format}.')
@property
def tria3(self):
"""Reference to underlying mesh tirangle element structure"""
return self.msh_t.tria3
@property
def triangles(self):
"""Reference to underlying mesh triangle element index array"""
return self.msh_t.tria3['index']
@property
def quad4(self):
"""Reference to underlying mesh quadrangle element structure"""
return self.msh_t.quad4
@property
def quads(self):
"""Reference to underlying mesh quadrangle element index array"""
return self.msh_t.quad4['index']
@property
def crs(self):
"""Reference to underlying mesh crs"""
return self.msh_t.crs
@property
def hull(self):
"""Reference to hull calculator helper object"""
if self._hull is None:
self._hull = Hull(self)
return self._hull
@property
def nodes(self):
"""Reference to node handler helper object"""
if self._nodes is None:
self._nodes = Nodes(self)
return self._nodes
@property
def elements(self):
"""Reference to element handler helper object"""
if self._elements is None:
self._elements = Elements(self)
return self._elements
class EuclideanMesh2D(EuclideanMesh):
"""2D Euclidean mesh definition
Attributes
----------
boundaries
vert2
value
bbox
Methods
-------
get_bbox(crs=None, output_type=None)
Gets the bounding box of the mesh elements.
tricontourf(**kwargs)
Create a contour plot from the value data on the nodes of
the mesh
interpolate(raster, method='spline', nprocs=None)
Interpolate raster date on the nodes.
get_contour(level)
Get contour lines from node value data at specified levels.
get_multipolygon(zmin=None, zmax=None)
Get multipolygon of the mesh hull.
"""
def __init__(self, mesh: jigsaw_msh_t) -> None:
"""Initialize Euclidean 2D mesh object.
Parameters
----------
mesh : jigsaw_msh_t
The underlying jigsaw_msh_t object to hold onto mesh data.
Raises
------
ValueError
If number of mesh dimensions is not equal to ``2``.
"""
super().__init__(mesh)
self._boundaries = None
if mesh.ndims != +2:
raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, '
"but expected ndims=2.")
if len(self.msh_t.value) == 0:
self.msh_t.value = np.array(
np.full((self.vert2['coord'].shape[0], 1), np.nan))
def get_bbox(
self,
crs: Union[str, CRS, None] = None,
output_type: Literal[None, 'polygon', 'bbox'] = None
) -> Union[Polygon, Bbox]:
"""Get the bounding box of mesh elements.
Parameters
----------
crs : str or CRS or None, default=None
CRS to transform the calculated bounding box into before
returning
output_type : { None, 'polygon', 'bbox'}, default=None
Output type
Returns
-------
Polygon or Bbox
Bounding box of the mesh elements.
"""
output_type = 'polygon' if output_type is None else output_type
xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0])
ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1])
crs = self.crs if crs is None else crs
if crs is not None:
if not self.crs.equals(crs):
transformer = Transformer.from_crs(
self.crs, crs, always_xy=True)
# pylint: disable=E0633
(xmin, xmax), (ymin, ymax) = transformer.transform(
(xmin, xmax), (ymin, ymax))
if output_type == 'polygon': # pylint: disable=R1705
return box(xmin, ymin, xmax, ymax)
elif output_type == 'bbox':
return Bbox([[xmin, ymin], [xmax, ymax]])
raise TypeError(
'Argument output_type must a string literal \'polygon\' or '
'\'bbox\'')
@property
def boundaries(self):
"""Handle to boundaries calculator helper object"""
if self._boundaries is None:
self._boundaries = Boundaries(self)
return self._boundaries
def tricontourf(self, **kwargs) -> Axes:
"""Generate contour for the data of triangular elements of the mesh
Parameters
----------
**kwargs : dict, optional
Passed to underlying `matplotlib` API.
Returns
-------
Axes
Axes on which the filled contour is drawn.
"""
return utils.tricontourf(self.msh_t, **kwargs)
def interpolate(
self,
raster: Union[Raster, List[Raster]],
method: Literal['spline', 'linear', 'nearest'] = 'spline',
nprocs: Optional[int] = None,
info_out_path: Union[pathlib.Path, str, None] = None,
filter_by_shape: bool = False
) -> None:
"""Interplate values from raster inputs to the mesh nodes.
Parameters
----------
raster : Raster or list of Raster
A single or a list of rasters from which values are
interpolated onto the mesh
method : {'spline', 'linear', 'nearest'}, default='spline'
Method of interpolation.
nprocs : int or None, default=None
Number of workers to use when interpolating data.
info_out_path : pathlike or str or None
Path for the output node interpolation information file
filter_by_shape : bool
Flag for node filtering based on raster bbox or shape
Returns
-------
None
"""
if isinstance(raster, Raster):
raster = [raster]
nprocs = -1 if nprocs is None else nprocs
nprocs = cpu_count() if nprocs == -1 else nprocs
# Fix an issue on Jupyter notebook where having pool execute
# interpolation even in case of nprocs == 1 would results in
# application getting stuck
if nprocs > 1:
with Pool(processes=nprocs) as pool:
res = pool.starmap(
_mesh_interpolate_worker,
[(self.vert2['coord'], self.crs,
_raster.tmpfile, _raster.chunk_size,
method, filter_by_shape)
for _raster in raster]
)
pool.join()
else:
res = [_mesh_interpolate_worker(
self.vert2['coord'], self.crs,
_raster.tmpfile, _raster.chunk_size,
method, filter_by_shape)
for _raster in raster]
values = self.msh_t.value.flatten()
interp_info_map = {}
for (mask, _values), rast in zip(res, raster):
values[mask] = _values
if info_out_path is not None:
vert_cs = None
rast_crs = rast.crs
if rast_crs.is_vertical:
if rast_crs.sub_crs_list is not None:
for sub_crs in rast_crs.sub_crs_list:
if sub_crs.is_vertical:
# TODO: What if sub CRS is compound, etc.?
vert_cs = sub_crs
elif rast_crs.source_crs is not None:
if rast_crs.source_crs.is_vertical:
# TODO: What if source CRS is compound, etc.?
vert_cs = rast_crs.source_crs
vert_cs_name = vert_cs.name
idxs = np.argwhere(mask).ravel()
interp_info_map.update({
idx: (rast.path, vert_cs_name)
for idx in idxs})
if info_out_path is not None:
coords = self.msh_t.vert2['coord'].copy()
geo_coords = coords.copy()
if not self.crs.is_geographic:
transformer = Transformer.from_crs(
self.crs, CRS.from_epsg(4326), always_xy=True)
# pylint: disable=E0633
geo_coords[:, 0], geo_coords[:, 1] = transformer.transform(
coords[:, 0], coords[:, 1])
vd_idxs=np.array(list(interp_info_map.keys()))
df_interp_info = pd.DataFrame(
index=vd_idxs,
data={
'x': coords[vd_idxs, 0],
'y': coords[vd_idxs, 1],
'lat': geo_coords[vd_idxs, 0],
'lon': geo_coords[vd_idxs, 1],
'elev': values[vd_idxs],
'crs': [i[1] for i in interp_info_map.values()],
'source': [i[0] for i in interp_info_map.values()]
}
)
df_interp_info.sort_index().to_csv(
info_out_path, header=False, index=True)
self.msh_t.value = np.array(values.reshape((values.shape[0], 1)),
dtype=jigsaw_msh_t.REALS_t)
def get_contour(self, level: float) -> LineString:
"""Extract contour lines at the specified `level` from mesh values
Parameters
----------
level : float
The level at which contour lines must be extracted.
Returns
-------
LineString
Extracted and merged contour lines.
Raises
------
ValueError
If mesh has nodes that have null value `np.nan`.
"""
# ONLY SUPPORTS TRIANGLES
for attr in ['quad4', 'hexa8']:
if len(getattr(self.msh_t, attr)) > 0:
warnings.warn(
'Mesh contour extraction only supports triangles')
coords = self.msh_t.vert2['coord']
values = self.msh_t.value
trias = self.msh_t.tria3['index']
if np.any(np.isnan(values)):
raise ValueError(
"Mesh contains invalid values. Raster values must"
"be interpolated to the mesh before generating "
"boundaries.")
x, y = coords[:, 0], coords[:, 1]
features = []
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
_logger.debug('Computing contours...')
fig, ax = plt.subplots()
ax.tricontour(
x, y, trias, values.ravel(), levels=[level])
plt.close(fig)
for path_collection in ax.collections:
for path in path_collection.get_paths():
try:
features.append(LineString(path.vertices))
except ValueError:
# LineStrings must have at least 2 coordinate tuples
pass
return linemerge(features)
def get_multipolygon(
self,
zmin: Optional[float] = None,
zmax: Optional[float] = None
) -> MultiPolygon:
"""Calculate multipolygon covering mesh elements (hull)
Parameters
----------
zmin : float or None
Minimum elevation to consider for multipolygon extraction
zmax : float or None
Maximum elevation to consider for multipolygon extraction
Returns
-------
MultiPolygon
Calculated multipolygon shape
"""
values = self.msh_t.value
mask = np.ones(values.shape)
if zmin is not None:
mask = np.logical_and(mask, values > zmin)
if zmax is not None:
mask = np.logical_and(mask, values < zmax)
# Assuming value is of shape (N, 1)
# ravel to make sure it's 1D
verts_in = np.argwhere(mask).ravel()
clipped_mesh = utils.clip_mesh_by_vertex(
self.msh_t, verts_in,
can_use_other_verts=True)
boundary_edges = utils.get_boundary_edges(clipped_mesh)
coords = clipped_mesh.vert2['coord']
coo_to_idx = {
tuple(coo): idx
for idx, coo in enumerate(coords)}
poly_gen = polygonize(coords[boundary_edges])
polys = list(poly_gen)
polys = sorted(polys, key=lambda p: p.area, reverse=True)
rings = [p.exterior for p in polys]
n_parents = np.zeros((len(rings),))
represent = np.array([r.coords[0] for r in rings])
for e, ring in enumerate(rings[:-1]):
path = Path(ring.coords, closed=True)
n_parents = n_parents + np.pad(
np.array([
path.contains_point(pt) for pt in represent[e+1:]]),
(e+1, 0), 'constant', constant_values=0)
# Get actual polygons based on logic described above
polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2]
return MultiPolygon(polys)
@property
def vert2(self):
"""Reference to underlying mesh 2D vertices structure"""
return self.msh_t.vert2
@property
def value(self):
"""Reference to underlying mesh values"""
return self.msh_t.value
@property
def bbox(self):
"""Calculates and returns bounding box of the mesh hull.
See Also
--------
get_bbox
"""
return self.get_bbox()
MeshType = Union[EuclideanMesh2D]
class Mesh(BaseMesh):
"""Mesh object factory
Factory class that creates and returns concrete mesh object
based on the input types.
Methods
-------
open(path, crs=None)
Read mesh data from a file on disk.
"""
def __new__(cls, mesh: jigsaw_msh_t) -> MeshType:
"""Construct a concrete mesh object.
Parameters
----------
mesh : jigsaw_msh_t
Input jigsaw mesh object
Returns
-------
MeshType
Mesh object created from the input
Raises
------
TypeError
Input `mesh` is not a `jigsaw_msh_t` object.
NotImplementedError
Input `mesh` object cannot be used to create a EuclideanMesh2D
"""
if not isinstance(mesh, jigsaw_msh_t):
raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, '
f'not type {type(mesh)}.')
if mesh.mshID == 'euclidean-mesh':
if mesh.ndims == 2:
return EuclideanMesh2D(mesh)
raise NotImplementedError(
f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not '
'handled.')
raise NotImplementedError(f'mshID={mesh.mshID} not handled.')
@staticmethod
def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType:
"""Read mesh from a file on disk
Parameters
----------
path : path-like
Path to the file containig mesh.
crs : CRS or None, default=None
CRS of the mesh in the path. Overwrites any info read
from file, no transformation is done.
Returns
-------
MeshType
Mesh object created by reading the file.
Raises
------
TypeError
If cannot determine the input mesh type.
Notes
-----
Currently only SMS-2DM and GRD formats are supported for
reading.
"""
try:
msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs))
msh_t.value = np.negative(msh_t.value)
return Mesh(msh_t)
except Exception as e: #pylint: disable=W0703
if 'not a valid grd file' in str(e):
pass
else:
raise e
try:
return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs)))
except ValueError:
pass
try:
msh_t = jigsaw_msh_t()
loadmsh(msh_t, path)
msh_t.crs = crs
return Mesh(msh_t)
except Exception as e: #pylint: disable=W0703
pass
raise TypeError(
f'Unable to automatically determine file type for {str(path)}.')
class Rings:
"""Helper class for handling mesh rings.
This is a helper class to manage the calculation of internal
and external rings of the mesh polygon or hull.
Attributes
----------
Methods
-------
__call__()
Returns all rings of the mesh hull
interior()
Return the interior rings of the mesh hull
exterior()
Return the exterior rings of the mesh hull
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initializes the ring calculator object for the input `mesh`
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object calculates rings.
"""
self.mesh = mesh
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"""Calcluates all the polygons of the mesh and extracts its rings.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing all rings of the mesh hull polygon.
The rings are in the form of `shapely.geometry.LinearRing`.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
polys = utils.get_mesh_polygons(self.mesh.msh_t)
data = []
bnd_id = 0
for poly in polys:
data.append({
"geometry": poly.exterior,
"bnd_id": bnd_id,
"type": 'exterior'
})
for interior in poly.interiors:
data.append({
"geometry": interior,
"bnd_id": bnd_id,
"type": 'interior'
})
bnd_id = bnd_id + 1
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def exterior(self) -> gpd.GeoDataFrame:
"""Extracts the exterior ring from the results of `__call__`.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing exterior ring of the mesh hull polygon.
"""
return self().loc[self()['type'] == 'exterior']
def interior(self) -> gpd.GeoDataFrame:
"""Extracts the interior rings from the results of `__call__`.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing interior rings of the mesh hull polygon.
"""
return self().loc[self()['type'] == 'interior']
class Edges:
"""Helper class for handling mesh boundary edges.
Attributes
----------
Methods
-------
__call__()
Return all boundary edges of the mesh hull
interior()
Return the interior boundary edges of the mesh hull
exterior()
Return the exterior boundary edges of the mesh hull
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initializes the edge calculator object for the input `mesh`
Parameters
----------
mesh : EuclideanMesh
Input mesh for which boundary edges are calculated.
"""
self.mesh = mesh
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"""Calculates all boundary edges for the mesh.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing all boundary edges of the mesh in
the form of `shapely.geometry.LineString` for each
coordinate couple.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
data = []
for ring in self.mesh.hull.rings().itertuples():
coords = ring.geometry.coords
for i in range(1, len(coords)):
data.append({
"geometry": LineString([coords[i-1], coords[i]]),
"bnd_id": ring.bnd_id,
"type": ring.type})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def exterior(self) -> gpd.GeoDataFrame:
"""Retruns exterior boundary edges from the results of `__call__`
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing exterior boundary edges of the mesh in
the form of line string couples.
"""
return self().loc[self()['type'] == 'exterior']
def interior(self) -> gpd.GeoDataFrame:
"""Retruns interior boundary edges from the results of `__call__`
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing interior boundary edges of the mesh in
the form of line string couples.
"""
return self().loc[self()['type'] == 'interior']
class Hull:
"""Helper class for handling mesh hull calculations.
This class wraps the functionality of ring and edge classes and
adds additional methods to calculate or extract the polygon or
triangulation of the mesh
Attributes
----------
Methods
-------
__call__()
Calculates all the polys from all mesh rings
exterior()
Calculates the exterior rings of the mesh hull.
interior()
Calculates the interior rings of the mesh hull.
implode()
Calculates all the polygons (including isolated domain
islands) in the mesh and returns a table of polygons.
multipolygon()
Calculates all the polygons (including isolated domain
islands) in the mesh and returns a multipolygon.
triangulation()
Calcluates a triangulation from the triangles and quads of
the mesh.
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initialize helper class for handling mesh hull calculations
Parameters
----------
mesh : EuclideanMesh
Input mesh for which hull calculations are done.
Notes
-----
This object holds onto the ring and edge calculator objects
as well as a reference to the input mesh.
"""
self.mesh = mesh
self.rings = Rings(mesh)
self.edges = Edges(mesh)
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"""Calculates all polygons of the mesh including domain islands
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing all polygons of the mesh.
See Also
--------
implode()
Dataframe with a single combined multipolygon.
multipolygon()
`shapely` multipolygon shape of combined mesh polygons.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
data = []
for bnd_id in np.unique(self.rings()['bnd_id'].tolist()):
exterior = self.rings().loc[
(self.rings()['bnd_id'] == bnd_id) &
(self.rings()['type'] == 'exterior')]
interiors = self.rings().loc[
(self.rings()['bnd_id'] == bnd_id) &
(self.rings()['type'] == 'interior')]
data.append({
"geometry": Polygon(
exterior.iloc[0].geometry.coords,
[row.geometry.coords for _, row
in interiors.iterrows()]),
"bnd_id": bnd_id
})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def exterior(self) -> gpd.GeoDataFrame:
"""Creates polygons from exterior rings of the mesh hull
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Polygons created from exterior rings of the mesh hull
"""
data = []
for exterior in self.rings().loc[
self.rings()['type'] == 'exterior'].itertuples():
data.append({"geometry": Polygon(exterior.geometry.coords)})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def interior(self) -> gpd.GeoDataFrame:
"""Creates polygons from interior rings of the mesh hull
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Polygons created from interior rings of the mesh hull
"""
data = []
for interior in self.rings().loc[
self.rings()['type'] == 'interior'].itertuples():
data.append({"geometry": Polygon(interior.geometry.coords)})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def implode(self) -> gpd.GeoDataFrame:
"""Creates a dataframe from mesh polygons.
Parameters
----------
Returns
------
gpd.GeoDataFrame
Dataframe containing polygons of the mesh.
See Also
--------
__call__()
Dataframe with multiple polygon and boundary ID entries
of the mesh polygons.
multipolygon()
`shapely` multipolygon shape of combined mesh polygons.
Notes
-----
The difference of the return value of this method and
`__call__` is that the `implode` returns a dataframe with
a single `MultiPolygon` where as `__call__` returns a
dataframe with multiple `Polygon` entries with associated
`bnd_id`.
"""
return gpd.GeoDataFrame(
{"geometry": MultiPolygon([polygon.geometry for polygon
in self().itertuples()])},
crs=self.mesh.crs)
def multipolygon(self) -> MultiPolygon:
"""Returns mesh multi-polygons.
Parameters
----------
Returns
------
MultiPolygon
Combined shape of polygons of the mesh.
See Also
--------
__call__()
Dataframe with multiple polygon and boundary ID entries
of the mesh polygons.
implode()
Dataframe with a single combined multipolygon of the mesh
polygons.
Notes
-----
The difference of the return value of this method and `implode`
is that `multipolygon` returns a `MultiPolygon` object where
as `implode` returns a dataframe warpping the multipolygon
object.
"""
mp = self.implode().iloc[0].geometry
if isinstance(mp, Polygon):
mp = MultiPolygon([mp])
return mp
def triangulation(self) -> Triangulation:
"""Create triangulation object from all the mesh elements.
Parameters
----------
Returns
-------
Triangulation
The `matplotlib` triangulation object create from all
the elements of the parent mesh.
Notes
-----
Currently only tria3 and quad4 elements are considered.
"""
triangles = self.mesh.msh_t.tria3['index'].tolist()
for quad in self.mesh.msh_t.quad4['index']:
triangles.extend([
[quad[0], quad[1], quad[3]],
[quad[1], quad[2], quad[3]]
])
return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles)
class Nodes:
"""Helper class for handling mesh nodes.
Attributes
----------
id_to_index : dict
Mapping to convert node IDs to node indexes.
index_to_id : dict
Mapping to convert node indexes to node IDs.
Methods
-------
__call__()
Creates a mapping between node IDs (index + 1) and node
coordinates
id()
Returns list of node IDs.
index()
Return array of node indices.
coords()
Return mesh coordinates.
values()
Return values stored for mesh nodes.
get_index_by_id(node_id)
Get the node index based on node ID.
get_id_by_index(index)
Get the node ID based on the node index.
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initializes node handler helper object.
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object handles nodes info.
"""
self.mesh = mesh
self._id_to_index = None
self._index_to_id = None
@lru_cache(maxsize=1)
def __call__(self) -> Dict[int, int]:
"""Creates a mapping between node IDs and indexes.
Parameters
----------
Returns
-------
dict
Mapping between node IDs and indexes.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
return {i+1: coord for i, coord in enumerate(self.coords())}
def id(self) -> List[int]:
"""Retrives a list of element IDs.
Parameters
----------
Returns
-------
list of int
List of node IDs as created by `__call__`
"""
return list(self().keys())
def index(self) -> npt.NDArray[int]:
"""Retrives an array of element indexes.
Parameters
----------
Returns
-------
array-like
Array of node indexes.
"""
return np.arange(len(self()))
def coords(self) -> npt.NDArray[np.float32]:
"""Retrieve the coordinates of mesh nodes
Parameters
----------
Returns
-------
array-like
Coordinates of the mesh nodes as returned by `BaseMesh.coord`
"""
return self.mesh.coord
def values(self):
"""Retrieve the values stored for mesh nodes
Parameters
----------
Returns
-------
array-like
Values on the mesh nodes as returned by `BaseMesh.values`
"""
return self.mesh.values
def get_index_by_id(self, node_id):
"""Converts mesh ID to mesh index.
Parameters
----------
node_id : int
ID of the node of interest
Returns
-------
int
Index of the node of interest
"""
return self.id_to_index[node_id]
def get_id_by_index(self, index: int):
"""Converts mesh index to mesh ID.
Parameters
----------
index : int
Index of the node of interest.
Returns
-------
int
ID of the node of interest
"""
return self.index_to_id[index]
@property
def id_to_index(self) -> Dict[int, int]:
"""Read-only property returning the mapping of ID to index
Notes
-----
Although the property is read-only, the return value object
is a cached mutable dictionary object. Modifying the mesh
without clearing the cache properly or mutating the
returned object could result in undefined behavior
"""
if self._id_to_index is None:
self._id_to_index = {node_id: index for index, node_id
in enumerate(self().keys())}
return self._id_to_index
@property
def index_to_id(self) -> Dict[int, int]:
"""Read-only property returning the mapping of index to ID
Notes
-----
Although the property is read-only, the return value object
is a cached mutable dictionary object. Modifying the mesh
without clearing the cache properly or mutating the
returned object could result in undefined behavior
"""
if self._index_to_id is None:
self._index_to_id = dict(enumerate(self().keys()))
return self._index_to_id
# def get_indexes_around_index(self, index):
# indexes_around_index = self.__dict__.get('indexes_around_index')
# if indexes_around_index is None:
# def append(geom):
# for simplex in geom:
# for i, j in permutations(simplex, 2):
# indexes_around_index[i].add(j)
# indexes_around_index = defaultdict(set)
# append(self.gr3.elements.triangles())
# append(self.gr3.elements.quads())
# self.__dict__['indexes_around_index'] = indexes_around_index
# return list(indexes_around_index[index])
class Elements:
"""Helper class for handling mesh elements.
Attributes
----------
Methods
--------
__call__()
Creates a mapping between element IDs and associated node IDs.
id()
Returns a list of element IDs.
index()
Returns an array of element indexes.
array()
Creates and returns a masked array of element node indices.
triangles()
Creates and returns a 2D array of triangular element node indices.
quads()
Creates and returns a 2D array of quadrangular element node indices.
triangulation()
Calcluates a triangulation from the triangles and quads of
the mesh.
geodataframe()
Creates and returns a dataframe of with polygon entires for
each element.
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initialize the element handler helper object.
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object handles elements info.
"""
self.mesh = mesh
@lru_cache(maxsize=1)
def __call__(self) -> Dict[int, npt.NDArray[int]]:
"""Creates a mapping between element IDs and associated node IDs.
Parameters
----------
Returns
-------
dict
Mapping between element IDs and associated node Ids
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
elements = {i+1: index+1 for i, index
in enumerate(self.mesh.msh_t.tria3['index'])}
elements.update({i+len(elements)+1: index+1 for i, index
in enumerate(self.mesh.msh_t.quad4['index'])})
return elements
@lru_cache(maxsize=1)
def id(self) -> List[int]:
"""Retrieves the list of element IDs as returned by `__call__`
Parameters
----------
Returns
-------
list of int
List of element IDs.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
return list(self().keys())
@lru_cache(maxsize=1)
def index(self) -> npt.NDArray[int]:
"""Retrieves an array of element indices
Parameters
----------
Returns
-------
npt.NDArray
1D array of element indices.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
return np.arange(len(self()))
def array(self) -> npt.NDArray[int]:
"""Retrieves a masked array of element node IDs.
The return value is ``n x m`` where ``n`` is the number of
elements and ``m`` is the maximum number of element nodes, e.g.
if there are only trias, then it's 3, for trias and quads it
is 4.
Parameters
----------
Returns
-------
npt.NDArray
Masked array where elements with fewer associated nodes
have trailing masked node columns in the array.
"""
rank = int(max(map(len, self().values())))
array = np.full((len(self()), rank), -1)
for i, elem_nd_ids in enumerate(self().values()):
row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids)))
array[i, :len(row)] = row
return np.ma.masked_equal(array, -1)
@lru_cache(maxsize=1)
def triangles(self) -> npt.NDArray[int]:
"""Retrieves an array of tria element node indices
Parameters
----------
Returns
-------
npt.NDArray
2D array of element nodes for triangle nodes
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
return np.array(
[list(map(self.mesh.nodes.get_index_by_id, element))
for element in self().values()
if len(element) == 3])
@lru_cache(maxsize=1)
def quads(self):
"""Retrieves an array of quad element node indices
Parameters
----------
Returns
-------
npt.NDArray
2D array of element nodes for quadrangle nodes
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
return np.array(
[list(map(self.mesh.nodes.get_index_by_id, element))
for element in self().values()
if len(element) == 4])
def triangulation(self) -> Triangulation:
"""Create triangulation object from all the mesh elements.
Parameters
----------
Returns
-------
Triangulation
The `matplotlib` triangulation object create from all
the elements of the parent mesh.
Notes
-----
Currently only tria3 and quad4 elements are considered.
"""
triangles = self.triangles().tolist()
for quad in self.quads():
# TODO: Not tested.
triangles.append([quad[0], quad[1], quad[3]])
triangles.append([quad[1], quad[2], quad[3]])
return Triangulation(
self.mesh.coord[:, 0],
self.mesh.coord[:, 1],
triangles)
def geodataframe(self) -> gpd.GeoDataFrame:
"""Create polygons for each element and return in dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe created from entries of `Polygon` type for
each element.
"""
data = []
for elem_id, elem_nd_ids in self().items():
data.append({
'geometry': Polygon(
self.mesh.coord[list(
map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]),
'id': elem_id})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
class Boundaries:
"""Helper class for mesh boundary condition calculation
Attributes
----------
data : dict
Mapping for boundary information
Methods
-------
__call__()
Retrieves a dataframe for all boundary shapes and type info.
__len__()
Gets the number of calculated boundary segments.
ocean()
Retrieves a dataframe containing shapes and type info of ocean
boundaries
land()
Retrieves a dataframe containing shapes and type info of land
boundaries
interior()
Retrieves a dataframe containing shapes and type info of island
boundaries
auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1)
Automatically generate boundary information based on the
input land indicator `threshold`
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initialize boundary helper object
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object calculates boundaries.
"""
# TODO: Add a way to manually initialize
self.mesh = mesh
self._ocean = gpd.GeoDataFrame()
self._land = gpd.GeoDataFrame()
self._interior = gpd.GeoDataFrame()
self._data = defaultdict(defaultdict)
@lru_cache(maxsize=1)
def _init_dataframes(self) -> None:
"""Internal: Creates boundary dataframes based on boundary data
Parameters
----------
Returns
-------
None
Notes
-----
This method doesn't have any return value, but it is cached
so that on re-execution it doesn't recalculate.
"""
boundaries = self._data
ocean_boundaries = []
land_boundaries = []
interior_boundaries = []
if boundaries is not None:
for ibtype, bnds in boundaries.items():
if ibtype is None:
for bnd_id, data in bnds.items():
indexes = list(map(self.mesh.nodes.get_index_by_id,
data['indexes']))
ocean_boundaries.append({
'id': bnd_id,
"index_id": data['indexes'],
"indexes": indexes,
'geometry': LineString(self.mesh.coord[indexes])
})
elif str(ibtype).endswith('1'):
for bnd_id, data in bnds.items():
indexes = list(map(self.mesh.nodes.get_index_by_id,
data['indexes']))
interior_boundaries.append({
'id': bnd_id,
'ibtype': ibtype,
"index_id": data['indexes'],
"indexes": indexes,
'geometry': LineString(self.mesh.coord[indexes])
})
else:
for bnd_id, data in bnds.items():
_indexes = np.array(data['indexes'])
if _indexes.ndim > 1:
# ndim > 1 implies we're dealing with an ADCIRC
# mesh that includes boundary pairs, such as weir
new_indexes = []
for i, line in enumerate(_indexes.T):
if i % 2 != 0:
new_indexes.extend(np.flip(line))
else:
new_indexes.extend(line)
_indexes = np.array(new_indexes).flatten()
else:
_indexes = _indexes.flatten()
indexes = list(map(self.mesh.nodes.get_index_by_id,
_indexes))
land_boundaries.append({
'id': bnd_id,
'ibtype': ibtype,
"index_id": data['indexes'],
"indexes": indexes,
'geometry': LineString(self.mesh.coord[indexes])
})
self._ocean = gpd.GeoDataFrame(ocean_boundaries)
self._land = gpd.GeoDataFrame(land_boundaries)
self._interior = gpd.GeoDataFrame(interior_boundaries)
def ocean(self) -> gpd.GeoDataFrame:
"""Retrieve the ocean boundary information dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing the geometry and information of
ocean open boundary.
"""
self._init_dataframes()
return self._ocean
def land(self):
"""Retrieve the land boundary information dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing the geometry and information of
land boundary.
"""
self._init_dataframes()
return self._land
def interior(self):
"""Retrieve the island boundary information dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing the geometry and information of
island boundary.
"""
self._init_dataframes()
return self._interior
@property
def data(self) -> Dict[Optional[int], Any]:
"""Read-only property referencing the boundary data dictionary"""
return self._data
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"""Retrieve the dataframe for all boundaries information
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing information for all boundaries shape
and type.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
self._init_dataframes()
data = []
for bnd in self.ocean().itertuples():
data.append({
'id': bnd.id,
'ibtype': None,
"index_id": bnd.index_id,
"indexes": bnd.indexes,
'geometry': bnd.geometry})
for bnd in self.land().itertuples():
data.append({
'id': bnd.id,
'ibtype': bnd.ibtype,
"index_id": bnd.index_id,
"indexes": bnd.indexes,
'geometry': bnd.geometry})
for bnd in self.interior().itertuples():
data.append({
'id': bnd.id,
'ibtype': bnd.ibtype,
"index_id": bnd.index_id,
"indexes": bnd.indexes,
'geometry': bnd.geometry})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def __len__(self) -> int:
"""Returns the number of boundary segments"""
return len(self())
def auto_generate(
self,
threshold: float = 0.,
land_ibtype: int = 0,
interior_ibtype: int = 1,
):
"""Automatically detect boundaries based on elevation data.
Parameters
----------
threshold : float, default=0
Threshold above which nodes are considered dry nodes
for ocean vs land boundary detection
land_ibtype : int, default=0
Value to assign to land boundary type
interior_ibtype : int, default=1
Value to assign to island boundary type
Returns
-------
None
Raises
------
ValueError
If any of the values assigned to a mesh node is `np.nan`.
Notes
-----
An edge is considered dry if any of the attached nodes are
dry (its elevation is larger than or equal to the `threshold`).
"""
values = self.mesh.value
if np.any(np.isnan(values)):
raise ValueError(
"Mesh contains invalid values. Raster values must"
"be interpolated to the mesh before generating "
"boundaries.")
coords = self.mesh.msh_t.vert2['coord']
coo_to_idx = {
tuple(coo): idx
for idx, coo in enumerate(coords)}
polys = utils.get_mesh_polygons(self.mesh.msh_t)
# TODO: Split using shapely to get bdry segments
boundaries = defaultdict(defaultdict)
bdry_type = dict
get_id = self.mesh.nodes.get_id_by_index
# generate exterior boundaries
for poly in polys:
ext_ring_coo = poly.exterior.coords
ext_ring = np.array([
(coo_to_idx[ext_ring_coo[e]],
coo_to_idx[ext_ring_coo[e + 1]])
for e, coo in enumerate(ext_ring_coo[:-1])])
# find boundary edges
edge_tag = np.full(ext_ring.shape, 0)
edge_tag[
np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1
edge_tag[
np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1
edge_tag[
np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1
edge_tag[
np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1
# sort boundary edges
ocean_boundary = []
land_boundary = []
for i, (e0, e1) in enumerate(edge_tag):
if np.any(np.asarray((e0, e1)) == 1):
land_boundary.append(tuple(ext_ring[i, :]))
elif np.any(np.asarray((e0, e1)) == -1):
ocean_boundary.append(tuple(ext_ring[i, :]))
# ocean_boundaries = utils.sort_edges(ocean_boundary)
# land_boundaries = utils.sort_edges(land_boundary)
ocean_boundaries = []
if len(ocean_boundary) != 0:
#pylint: disable=not-an-iterable
ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist())
ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs
ocean_boundaries = [
[(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]])
for e, coo in enumerate(seg.coords[:-1])]
for seg in ocean_segs]
land_boundaries = []
if len(land_boundary) != 0:
#pylint: disable=not-an-iterable
land_segs = linemerge(coords[np.array(land_boundary)].tolist())
land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs
land_boundaries = [
[(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]])
for e, coo in enumerate(seg.coords[:-1])]
for seg in land_segs]
_bnd_id = len(boundaries[None])
for bnd in ocean_boundaries:
e0, e1 = [list(t) for t in zip(*bnd)]
e0 = [get_id(vert) for vert in e0]
data = e0 + [get_id(e1[-1])]
boundaries[None][_bnd_id] = bdry_type(
indexes=data, properties={})
_bnd_id += 1
# add land boundaries
_bnd_id = len(boundaries[land_ibtype])
for bnd in land_boundaries:
e0, e1 = [list(t) for t in zip(*bnd)]
e0 = [get_id(vert) for vert in e0]
data = e0 + [get_id(e1[-1])]
boundaries[land_ibtype][_bnd_id] = bdry_type(
indexes=data, properties={})
_bnd_id += 1
# generate interior boundaries
_bnd_id = 0
interior_boundaries = defaultdict()
for poly in polys:
interiors = poly.interiors
for interior in interiors:
int_ring_coo = interior.coords
int_ring = [
(coo_to_idx[int_ring_coo[e]],
coo_to_idx[int_ring_coo[e + 1]])
for e, coo in enumerate(int_ring_coo[:-1])]
# TODO: Do we still need these?
e0, e1 = [list(t) for t in zip(*int_ring)]
if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0:
e0 = e0[::-1]
e1 = e1[::-1]
e0 = [get_id(vert) for vert in e0]
e0.append(e0[0])
interior_boundaries[_bnd_id] = e0
_bnd_id += 1
for bnd_id, data in interior_boundaries.items():
boundaries[interior_ibtype][bnd_id] = bdry_type(
indexes=data, properties={})
self._data = boundaries
self._init_dataframes.cache_clear()
self.__call__.cache_clear()
self._init_dataframes()
SortedRingType = Dict[int,
Dict[Literal['exterior', 'interiors'],
Union[npt.NDArray, List[npt.NDArray]]]
]
def sort_rings(
index_rings: List[List[Tuple[int, int]]],
vertices: npt.NDArray[np.float32]) -> SortedRingType:
"""Sorts a list of index-rings.
Takes a list of unsorted index rings and sorts them into
"exterior" and "interior" components. Any doubly-nested rings
are considered exterior rings.
Parameters
----------
index_rings : List[List[Tuple[int, int]]]
Unosorted list of list of mesh edges as specified by end node
indexs of each edge.
vertices : npt.NDArray[np.float32]
2D ``n x 2`` array of node coordinate couples.
Returns
-------
SortedRingType
Dictionary of information aboout polygon boundaries extracted
based on the input
Notes
-----
The return value is a mapping of ring index to dictionary
containing exterior and interior linear ring information as
numpy array
This function is not currently used, instead a different faster
approach is used for boundary and polygon calculation from
elements.
"""
# TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can
# probably be optimized using shapely.
# sort index_rings into corresponding "polygons"
areas = []
for index_ring in index_rings:
e0, e1 = [list(t) for t in zip(*index_ring)]
areas.append(float(Polygon(vertices[e0, :]).area))
# maximum area must be main mesh
idx = areas.index(np.max(areas))
exterior = index_rings.pop(idx)
areas.pop(idx)
_id = 0
_index_rings = {}
_index_rings[_id] = {
'exterior': np.asarray(exterior),
'interiors': []
}
e0, e1 = [list(t) for t in zip(*exterior)]
path = Path(vertices[e0 + [e0[0]], :], closed=True)
while len(index_rings) > 0:
# find all internal rings
potential_interiors = []
for i, index_ring in enumerate(index_rings):
e0, e1 = [list(t) for t in zip(*index_ring)]
if path.contains_point(vertices[e0[0], :]):
potential_interiors.append(i)
# filter out nested rings
real_interiors = []
for i, p_interior in reversed(
list(enumerate(potential_interiors))):
_p_interior = index_rings[p_interior]
check = [index_rings[k]
for j, k in
reversed(list(enumerate(potential_interiors)))
if i != j]
has_parent = False
for _path in check:
e0, e1 = [list(t) for t in zip(*_path)]
_path = Path(vertices[e0 + [e0[0]], :], closed=True)
if _path.contains_point(vertices[_p_interior[0][0], :]):
has_parent = True
if not has_parent:
real_interiors.append(p_interior)
# pop real rings from collection
for i in reversed(sorted(real_interiors)):
_index_rings[_id]['interiors'].append(
np.asarray(index_rings.pop(i)))
areas.pop(i)
# if no internal rings found, initialize next polygon
if len(index_rings) > 0:
idx = areas.index(np.max(areas))
exterior = index_rings.pop(idx)
areas.pop(idx)
_id += 1
_index_rings[_id] = {
'exterior': np.asarray(exterior),
'interiors': []
}
e0, e1 = [list(t) for t in zip(*exterior)]
path = Path(vertices[e0 + [e0[0]], :], closed=True)
return _index_rings
def _mesh_interpolate_worker(
coords: npt.NDArray[np.float32],
coords_crs: CRS,
raster_path: Union[str, Path],
chunk_size: Optional[int],
method: Literal['spline', 'linear', 'nearest'] = "spline",
filter_by_shape: bool = False):
"""Interpolator worker function to be used in parallel calls
Parameters
----------
coords : npt.NDArray[np.float32]
Mesh node coordinates.
coords_crs : CRS
Coordinate reference system of the input mesh coordinates.
raster_path : str or Path
Path to the raster temporary working file.
chunk_size : int or None
Chunk size for windowing over the raster.
method : {'spline', 'linear', 'nearest'}, default='spline'
Method of interpolation.
filter_by_shape : bool
Flag for node filtering based on raster bbox or shape
Returns
-------
idxs : npt.NDArray[bool]
Mask of the nodes whose values are updated by current
interpolation
values : npt.NDArray[np.float32]
Interpolated values.
Raises
------
ValueError
If specified interpolation `method` is not supported.
"""
coords = np.array(coords)
raster = Raster(raster_path)
idxs = []
values = []
for window in raster.iter_windows(chunk_size=chunk_size, overlap=2):
if not raster.crs.equals(coords_crs):
transformer = Transformer.from_crs(
coords_crs, raster.crs, always_xy=True)
# pylint: disable=E0633
coords[:, 0], coords[:, 1] = transformer.transform(
coords[:, 0], coords[:, 1])
xi = raster.get_x(window)
yi = raster.get_y(window)
# Use masked array to ignore missing values from DEM
zi = raster.get_values(window=window, masked=True)
if not filter_by_shape:
_idxs = np.logical_and(
np.logical_and(
np.min(xi) <= coords[:, 0],
np.max(xi) >= coords[:, 0]),
np.logical_and(
np.min(yi) <= coords[:, 1],
np.max(yi) >= coords[:, 1]))
else:
shape = raster.get_multipolygon()
gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1])
_idxs = gs_pt.intersects(shape)
interp_mask = None
if method == 'spline':
f = RectBivariateSpline(
xi,
np.flip(yi),
np.flipud(zi).T,
kx=3, ky=3, s=0,
# bbox=[min(x), max(x), min(y), max(y)] # ??
)
_values = f.ev(coords[_idxs, 0], coords[_idxs, 1])
elif method in ['nearest', 'linear']:
# Inspired by StackOverflow 35807321
if np.any(zi.mask):
m_interp = RegularGridInterpolator(
(xi, np.flip(yi)),
np.flipud(zi.mask).T.astype(bool),
method=method
)
# Pick nodes NOT "contaminated" by masked values
interp_mask = m_interp(coords[_idxs]) > 0
f = RegularGridInterpolator(
(xi, np.flip(yi)),
np.flipud(zi).T,
method=method
)
_values = f(coords[_idxs])
else:
raise ValueError(
f"Invalid value method specified <{method}>!")
if interp_mask is not None:
# pylint: disable=invalid-unary-operand-type
helper = np.ones_like(_values).astype(bool)
helper[interp_mask] = False
# _idxs is inverse mask
_idxs[_idxs] = helper
_values = _values[~interp_mask]
idxs.append(_idxs)
values.append(_values)
return (np.hstack(idxs), np.hstack(values))
| 31.437795
| 95
| 0.548111
|
from functools import lru_cache
import logging
from multiprocessing import Pool, cpu_count
import os
import pathlib
from collections import defaultdict
import warnings
from typing import Union, List, Tuple, Dict, Any, Optional
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
import pandas as pd
import geopandas as gpd
from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk
from matplotlib.path import Path
from matplotlib.transforms import Bbox
from matplotlib.tri import Triangulation
from matplotlib.axes import Axes
import matplotlib.pyplot as plt
import numpy as np
import numpy.typing as npt
from pyproj import CRS, Transformer
from scipy.interpolate import (
RectBivariateSpline, RegularGridInterpolator)
from shapely.geometry import (
LineString, box, Polygon, MultiPolygon)
from shapely.ops import polygonize, linemerge
from ocsmesh import utils
from ocsmesh.raster import Raster
from ocsmesh.mesh.base import BaseMesh
from ocsmesh.mesh.parsers import grd, sms2dm
_logger = logging.getLogger(__name__)
class EuclideanMesh(BaseMesh):
def __init__(self, mesh: jigsaw_msh_t) -> None:
if not isinstance(mesh, jigsaw_msh_t):
raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, '
f'not type {type(mesh)}.')
if mesh.mshID != 'euclidean-mesh':
raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, '
"but expected 'euclidean-mesh'.")
if not hasattr(mesh, 'crs'):
warnings.warn('Input mesh has no CRS information.')
mesh.crs = None
else:
if not isinstance(mesh.crs, CRS):
raise ValueError(f'crs property must be of type {CRS}, not '
f'type {type(mesh.crs)}.')
self._hull = None
self._nodes = None
self._elements = None
self._msh_t = mesh
def write(
self,
path: Union[str, os.PathLike],
overwrite: bool = False,
format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd',
) -> None:
path = pathlib.Path(path)
if path.exists() and overwrite is not True:
raise IOError(
f'File {str(path)} exists and overwrite is not True.')
if format == 'grd':
grd_dict = utils.msh_t_to_grd(self.msh_t)
if self._boundaries and self._boundaries.data:
grd_dict.update(boundaries=self._boundaries.data)
grd.write(grd_dict, path, overwrite)
elif format == '2dm':
sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite)
elif format == 'msh':
savemsh(str(path), self.msh_t)
elif format == 'vtk':
savevtk(str(path), self.msh_t)
else:
raise ValueError(f'Unhandled format {format}.')
@property
def tria3(self):
return self.msh_t.tria3
@property
def triangles(self):
return self.msh_t.tria3['index']
@property
def quad4(self):
return self.msh_t.quad4
@property
def quads(self):
return self.msh_t.quad4['index']
@property
def crs(self):
return self.msh_t.crs
@property
def hull(self):
if self._hull is None:
self._hull = Hull(self)
return self._hull
@property
def nodes(self):
if self._nodes is None:
self._nodes = Nodes(self)
return self._nodes
@property
def elements(self):
if self._elements is None:
self._elements = Elements(self)
return self._elements
class EuclideanMesh2D(EuclideanMesh):
def __init__(self, mesh: jigsaw_msh_t) -> None:
super().__init__(mesh)
self._boundaries = None
if mesh.ndims != +2:
raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, '
"but expected ndims=2.")
if len(self.msh_t.value) == 0:
self.msh_t.value = np.array(
np.full((self.vert2['coord'].shape[0], 1), np.nan))
def get_bbox(
self,
crs: Union[str, CRS, None] = None,
output_type: Literal[None, 'polygon', 'bbox'] = None
) -> Union[Polygon, Bbox]:
output_type = 'polygon' if output_type is None else output_type
xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0])
ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1])
crs = self.crs if crs is None else crs
if crs is not None:
if not self.crs.equals(crs):
transformer = Transformer.from_crs(
self.crs, crs, always_xy=True)
(xmin, xmax), (ymin, ymax) = transformer.transform(
(xmin, xmax), (ymin, ymax))
if output_type == 'polygon':
return box(xmin, ymin, xmax, ymax)
elif output_type == 'bbox':
return Bbox([[xmin, ymin], [xmax, ymax]])
raise TypeError(
'Argument output_type must a string literal \'polygon\' or '
'\'bbox\'')
@property
def boundaries(self):
if self._boundaries is None:
self._boundaries = Boundaries(self)
return self._boundaries
def tricontourf(self, **kwargs) -> Axes:
return utils.tricontourf(self.msh_t, **kwargs)
def interpolate(
self,
raster: Union[Raster, List[Raster]],
method: Literal['spline', 'linear', 'nearest'] = 'spline',
nprocs: Optional[int] = None,
info_out_path: Union[pathlib.Path, str, None] = None,
filter_by_shape: bool = False
) -> None:
if isinstance(raster, Raster):
raster = [raster]
nprocs = -1 if nprocs is None else nprocs
nprocs = cpu_count() if nprocs == -1 else nprocs
if nprocs > 1:
with Pool(processes=nprocs) as pool:
res = pool.starmap(
_mesh_interpolate_worker,
[(self.vert2['coord'], self.crs,
_raster.tmpfile, _raster.chunk_size,
method, filter_by_shape)
for _raster in raster]
)
pool.join()
else:
res = [_mesh_interpolate_worker(
self.vert2['coord'], self.crs,
_raster.tmpfile, _raster.chunk_size,
method, filter_by_shape)
for _raster in raster]
values = self.msh_t.value.flatten()
interp_info_map = {}
for (mask, _values), rast in zip(res, raster):
values[mask] = _values
if info_out_path is not None:
vert_cs = None
rast_crs = rast.crs
if rast_crs.is_vertical:
if rast_crs.sub_crs_list is not None:
for sub_crs in rast_crs.sub_crs_list:
if sub_crs.is_vertical:
vert_cs = sub_crs
elif rast_crs.source_crs is not None:
if rast_crs.source_crs.is_vertical:
vert_cs = rast_crs.source_crs
vert_cs_name = vert_cs.name
idxs = np.argwhere(mask).ravel()
interp_info_map.update({
idx: (rast.path, vert_cs_name)
for idx in idxs})
if info_out_path is not None:
coords = self.msh_t.vert2['coord'].copy()
geo_coords = coords.copy()
if not self.crs.is_geographic:
transformer = Transformer.from_crs(
self.crs, CRS.from_epsg(4326), always_xy=True)
geo_coords[:, 0], geo_coords[:, 1] = transformer.transform(
coords[:, 0], coords[:, 1])
vd_idxs=np.array(list(interp_info_map.keys()))
df_interp_info = pd.DataFrame(
index=vd_idxs,
data={
'x': coords[vd_idxs, 0],
'y': coords[vd_idxs, 1],
'lat': geo_coords[vd_idxs, 0],
'lon': geo_coords[vd_idxs, 1],
'elev': values[vd_idxs],
'crs': [i[1] for i in interp_info_map.values()],
'source': [i[0] for i in interp_info_map.values()]
}
)
df_interp_info.sort_index().to_csv(
info_out_path, header=False, index=True)
self.msh_t.value = np.array(values.reshape((values.shape[0], 1)),
dtype=jigsaw_msh_t.REALS_t)
def get_contour(self, level: float) -> LineString:
for attr in ['quad4', 'hexa8']:
if len(getattr(self.msh_t, attr)) > 0:
warnings.warn(
'Mesh contour extraction only supports triangles')
coords = self.msh_t.vert2['coord']
values = self.msh_t.value
trias = self.msh_t.tria3['index']
if np.any(np.isnan(values)):
raise ValueError(
"Mesh contains invalid values. Raster values must"
"be interpolated to the mesh before generating "
"boundaries.")
x, y = coords[:, 0], coords[:, 1]
features = []
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
_logger.debug('Computing contours...')
fig, ax = plt.subplots()
ax.tricontour(
x, y, trias, values.ravel(), levels=[level])
plt.close(fig)
for path_collection in ax.collections:
for path in path_collection.get_paths():
try:
features.append(LineString(path.vertices))
except ValueError:
pass
return linemerge(features)
def get_multipolygon(
self,
zmin: Optional[float] = None,
zmax: Optional[float] = None
) -> MultiPolygon:
values = self.msh_t.value
mask = np.ones(values.shape)
if zmin is not None:
mask = np.logical_and(mask, values > zmin)
if zmax is not None:
mask = np.logical_and(mask, values < zmax)
verts_in = np.argwhere(mask).ravel()
clipped_mesh = utils.clip_mesh_by_vertex(
self.msh_t, verts_in,
can_use_other_verts=True)
boundary_edges = utils.get_boundary_edges(clipped_mesh)
coords = clipped_mesh.vert2['coord']
coo_to_idx = {
tuple(coo): idx
for idx, coo in enumerate(coords)}
poly_gen = polygonize(coords[boundary_edges])
polys = list(poly_gen)
polys = sorted(polys, key=lambda p: p.area, reverse=True)
rings = [p.exterior for p in polys]
n_parents = np.zeros((len(rings),))
represent = np.array([r.coords[0] for r in rings])
for e, ring in enumerate(rings[:-1]):
path = Path(ring.coords, closed=True)
n_parents = n_parents + np.pad(
np.array([
path.contains_point(pt) for pt in represent[e+1:]]),
(e+1, 0), 'constant', constant_values=0)
# Get actual polygons based on logic described above
polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2]
return MultiPolygon(polys)
@property
def vert2(self):
return self.msh_t.vert2
@property
def value(self):
return self.msh_t.value
@property
def bbox(self):
return self.get_bbox()
MeshType = Union[EuclideanMesh2D]
class Mesh(BaseMesh):
def __new__(cls, mesh: jigsaw_msh_t) -> MeshType:
if not isinstance(mesh, jigsaw_msh_t):
raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, '
f'not type {type(mesh)}.')
if mesh.mshID == 'euclidean-mesh':
if mesh.ndims == 2:
return EuclideanMesh2D(mesh)
raise NotImplementedError(
f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not '
'handled.')
raise NotImplementedError(f'mshID={mesh.mshID} not handled.')
@staticmethod
def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType:
try:
msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs))
msh_t.value = np.negative(msh_t.value)
return Mesh(msh_t)
except Exception as e: #pylint: disable=W0703
if 'not a valid grd file' in str(e):
pass
else:
raise e
try:
return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs)))
except ValueError:
pass
try:
msh_t = jigsaw_msh_t()
loadmsh(msh_t, path)
msh_t.crs = crs
return Mesh(msh_t)
except Exception as e: #pylint: disable=W0703
pass
raise TypeError(
f'Unable to automatically determine file type for {str(path)}.')
class Rings:
def __init__(self, mesh: EuclideanMesh) -> None:
self.mesh = mesh
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
polys = utils.get_mesh_polygons(self.mesh.msh_t)
data = []
bnd_id = 0
for poly in polys:
data.append({
"geometry": poly.exterior,
"bnd_id": bnd_id,
"type": 'exterior'
})
for interior in poly.interiors:
data.append({
"geometry": interior,
"bnd_id": bnd_id,
"type": 'interior'
})
bnd_id = bnd_id + 1
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def exterior(self) -> gpd.GeoDataFrame:
return self().loc[self()['type'] == 'exterior']
def interior(self) -> gpd.GeoDataFrame:
return self().loc[self()['type'] == 'interior']
class Edges:
def __init__(self, mesh: EuclideanMesh) -> None:
self.mesh = mesh
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
data = []
for ring in self.mesh.hull.rings().itertuples():
coords = ring.geometry.coords
for i in range(1, len(coords)):
data.append({
"geometry": LineString([coords[i-1], coords[i]]),
"bnd_id": ring.bnd_id,
"type": ring.type})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def exterior(self) -> gpd.GeoDataFrame:
return self().loc[self()['type'] == 'exterior']
def interior(self) -> gpd.GeoDataFrame:
return self().loc[self()['type'] == 'interior']
class Hull:
def __init__(self, mesh: EuclideanMesh) -> None:
self.mesh = mesh
self.rings = Rings(mesh)
self.edges = Edges(mesh)
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
data = []
for bnd_id in np.unique(self.rings()['bnd_id'].tolist()):
exterior = self.rings().loc[
(self.rings()['bnd_id'] == bnd_id) &
(self.rings()['type'] == 'exterior')]
interiors = self.rings().loc[
(self.rings()['bnd_id'] == bnd_id) &
(self.rings()['type'] == 'interior')]
data.append({
"geometry": Polygon(
exterior.iloc[0].geometry.coords,
[row.geometry.coords for _, row
in interiors.iterrows()]),
"bnd_id": bnd_id
})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def exterior(self) -> gpd.GeoDataFrame:
data = []
for exterior in self.rings().loc[
self.rings()['type'] == 'exterior'].itertuples():
data.append({"geometry": Polygon(exterior.geometry.coords)})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def interior(self) -> gpd.GeoDataFrame:
data = []
for interior in self.rings().loc[
self.rings()['type'] == 'interior'].itertuples():
data.append({"geometry": Polygon(interior.geometry.coords)})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def implode(self) -> gpd.GeoDataFrame:
return gpd.GeoDataFrame(
{"geometry": MultiPolygon([polygon.geometry for polygon
in self().itertuples()])},
crs=self.mesh.crs)
def multipolygon(self) -> MultiPolygon:
mp = self.implode().iloc[0].geometry
if isinstance(mp, Polygon):
mp = MultiPolygon([mp])
return mp
def triangulation(self) -> Triangulation:
triangles = self.mesh.msh_t.tria3['index'].tolist()
for quad in self.mesh.msh_t.quad4['index']:
triangles.extend([
[quad[0], quad[1], quad[3]],
[quad[1], quad[2], quad[3]]
])
return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles)
class Nodes:
def __init__(self, mesh: EuclideanMesh) -> None:
self.mesh = mesh
self._id_to_index = None
self._index_to_id = None
@lru_cache(maxsize=1)
def __call__(self) -> Dict[int, int]:
return {i+1: coord for i, coord in enumerate(self.coords())}
def id(self) -> List[int]:
return list(self().keys())
def index(self) -> npt.NDArray[int]:
return np.arange(len(self()))
def coords(self) -> npt.NDArray[np.float32]:
return self.mesh.coord
def values(self):
return self.mesh.values
def get_index_by_id(self, node_id):
return self.id_to_index[node_id]
def get_id_by_index(self, index: int):
return self.index_to_id[index]
@property
def id_to_index(self) -> Dict[int, int]:
if self._id_to_index is None:
self._id_to_index = {node_id: index for index, node_id
in enumerate(self().keys())}
return self._id_to_index
@property
def index_to_id(self) -> Dict[int, int]:
if self._index_to_id is None:
self._index_to_id = dict(enumerate(self().keys()))
return self._index_to_id
# def get_indexes_around_index(self, index):
# indexes_around_index = self.__dict__.get('indexes_around_index')
# if indexes_around_index is None:
# def append(geom):
# for simplex in geom:
# for i, j in permutations(simplex, 2):
# indexes_around_index[i].add(j)
# indexes_around_index = defaultdict(set)
# append(self.gr3.elements.triangles())
# append(self.gr3.elements.quads())
# self.__dict__['indexes_around_index'] = indexes_around_index
# return list(indexes_around_index[index])
class Elements:
def __init__(self, mesh: EuclideanMesh) -> None:
self.mesh = mesh
@lru_cache(maxsize=1)
def __call__(self) -> Dict[int, npt.NDArray[int]]:
elements = {i+1: index+1 for i, index
in enumerate(self.mesh.msh_t.tria3['index'])}
elements.update({i+len(elements)+1: index+1 for i, index
in enumerate(self.mesh.msh_t.quad4['index'])})
return elements
@lru_cache(maxsize=1)
def id(self) -> List[int]:
return list(self().keys())
@lru_cache(maxsize=1)
def index(self) -> npt.NDArray[int]:
return np.arange(len(self()))
def array(self) -> npt.NDArray[int]:
rank = int(max(map(len, self().values())))
array = np.full((len(self()), rank), -1)
for i, elem_nd_ids in enumerate(self().values()):
row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids)))
array[i, :len(row)] = row
return np.ma.masked_equal(array, -1)
@lru_cache(maxsize=1)
def triangles(self) -> npt.NDArray[int]:
return np.array(
[list(map(self.mesh.nodes.get_index_by_id, element))
for element in self().values()
if len(element) == 3])
@lru_cache(maxsize=1)
def quads(self):
return np.array(
[list(map(self.mesh.nodes.get_index_by_id, element))
for element in self().values()
if len(element) == 4])
def triangulation(self) -> Triangulation:
triangles = self.triangles().tolist()
for quad in self.quads():
# TODO: Not tested.
triangles.append([quad[0], quad[1], quad[3]])
triangles.append([quad[1], quad[2], quad[3]])
return Triangulation(
self.mesh.coord[:, 0],
self.mesh.coord[:, 1],
triangles)
def geodataframe(self) -> gpd.GeoDataFrame:
data = []
for elem_id, elem_nd_ids in self().items():
data.append({
'geometry': Polygon(
self.mesh.coord[list(
map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]),
'id': elem_id})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
class Boundaries:
def __init__(self, mesh: EuclideanMesh) -> None:
# TODO: Add a way to manually initialize
self.mesh = mesh
self._ocean = gpd.GeoDataFrame()
self._land = gpd.GeoDataFrame()
self._interior = gpd.GeoDataFrame()
self._data = defaultdict(defaultdict)
@lru_cache(maxsize=1)
def _init_dataframes(self) -> None:
boundaries = self._data
ocean_boundaries = []
land_boundaries = []
interior_boundaries = []
if boundaries is not None:
for ibtype, bnds in boundaries.items():
if ibtype is None:
for bnd_id, data in bnds.items():
indexes = list(map(self.mesh.nodes.get_index_by_id,
data['indexes']))
ocean_boundaries.append({
'id': bnd_id,
"index_id": data['indexes'],
"indexes": indexes,
'geometry': LineString(self.mesh.coord[indexes])
})
elif str(ibtype).endswith('1'):
for bnd_id, data in bnds.items():
indexes = list(map(self.mesh.nodes.get_index_by_id,
data['indexes']))
interior_boundaries.append({
'id': bnd_id,
'ibtype': ibtype,
"index_id": data['indexes'],
"indexes": indexes,
'geometry': LineString(self.mesh.coord[indexes])
})
else:
for bnd_id, data in bnds.items():
_indexes = np.array(data['indexes'])
if _indexes.ndim > 1:
# ndim > 1 implies we're dealing with an ADCIRC
new_indexes = []
for i, line in enumerate(_indexes.T):
if i % 2 != 0:
new_indexes.extend(np.flip(line))
else:
new_indexes.extend(line)
_indexes = np.array(new_indexes).flatten()
else:
_indexes = _indexes.flatten()
indexes = list(map(self.mesh.nodes.get_index_by_id,
_indexes))
land_boundaries.append({
'id': bnd_id,
'ibtype': ibtype,
"index_id": data['indexes'],
"indexes": indexes,
'geometry': LineString(self.mesh.coord[indexes])
})
self._ocean = gpd.GeoDataFrame(ocean_boundaries)
self._land = gpd.GeoDataFrame(land_boundaries)
self._interior = gpd.GeoDataFrame(interior_boundaries)
def ocean(self) -> gpd.GeoDataFrame:
self._init_dataframes()
return self._ocean
def land(self):
self._init_dataframes()
return self._land
def interior(self):
self._init_dataframes()
return self._interior
@property
def data(self) -> Dict[Optional[int], Any]:
return self._data
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
self._init_dataframes()
data = []
for bnd in self.ocean().itertuples():
data.append({
'id': bnd.id,
'ibtype': None,
"index_id": bnd.index_id,
"indexes": bnd.indexes,
'geometry': bnd.geometry})
for bnd in self.land().itertuples():
data.append({
'id': bnd.id,
'ibtype': bnd.ibtype,
"index_id": bnd.index_id,
"indexes": bnd.indexes,
'geometry': bnd.geometry})
for bnd in self.interior().itertuples():
data.append({
'id': bnd.id,
'ibtype': bnd.ibtype,
"index_id": bnd.index_id,
"indexes": bnd.indexes,
'geometry': bnd.geometry})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def __len__(self) -> int:
return len(self())
def auto_generate(
self,
threshold: float = 0.,
land_ibtype: int = 0,
interior_ibtype: int = 1,
):
values = self.mesh.value
if np.any(np.isnan(values)):
raise ValueError(
"Mesh contains invalid values. Raster values must"
"be interpolated to the mesh before generating "
"boundaries.")
coords = self.mesh.msh_t.vert2['coord']
coo_to_idx = {
tuple(coo): idx
for idx, coo in enumerate(coords)}
polys = utils.get_mesh_polygons(self.mesh.msh_t)
boundaries = defaultdict(defaultdict)
bdry_type = dict
get_id = self.mesh.nodes.get_id_by_index
for poly in polys:
ext_ring_coo = poly.exterior.coords
ext_ring = np.array([
(coo_to_idx[ext_ring_coo[e]],
coo_to_idx[ext_ring_coo[e + 1]])
for e, coo in enumerate(ext_ring_coo[:-1])])
edge_tag = np.full(ext_ring.shape, 0)
edge_tag[
np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1
edge_tag[
np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1
edge_tag[
np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1
edge_tag[
np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1
ocean_boundary = []
land_boundary = []
for i, (e0, e1) in enumerate(edge_tag):
if np.any(np.asarray((e0, e1)) == 1):
land_boundary.append(tuple(ext_ring[i, :]))
elif np.any(np.asarray((e0, e1)) == -1):
ocean_boundary.append(tuple(ext_ring[i, :]))
ocean_boundaries = []
if len(ocean_boundary) != 0:
ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist())
ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs
ocean_boundaries = [
[(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]])
for e, coo in enumerate(seg.coords[:-1])]
for seg in ocean_segs]
land_boundaries = []
if len(land_boundary) != 0:
land_segs = linemerge(coords[np.array(land_boundary)].tolist())
land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs
land_boundaries = [
[(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]])
for e, coo in enumerate(seg.coords[:-1])]
for seg in land_segs]
_bnd_id = len(boundaries[None])
for bnd in ocean_boundaries:
e0, e1 = [list(t) for t in zip(*bnd)]
e0 = [get_id(vert) for vert in e0]
data = e0 + [get_id(e1[-1])]
boundaries[None][_bnd_id] = bdry_type(
indexes=data, properties={})
_bnd_id += 1
_bnd_id = len(boundaries[land_ibtype])
for bnd in land_boundaries:
e0, e1 = [list(t) for t in zip(*bnd)]
e0 = [get_id(vert) for vert in e0]
data = e0 + [get_id(e1[-1])]
boundaries[land_ibtype][_bnd_id] = bdry_type(
indexes=data, properties={})
_bnd_id += 1
_bnd_id = 0
interior_boundaries = defaultdict()
for poly in polys:
interiors = poly.interiors
for interior in interiors:
int_ring_coo = interior.coords
int_ring = [
(coo_to_idx[int_ring_coo[e]],
coo_to_idx[int_ring_coo[e + 1]])
for e, coo in enumerate(int_ring_coo[:-1])]
e0, e1 = [list(t) for t in zip(*int_ring)]
if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0:
e0 = e0[::-1]
e1 = e1[::-1]
e0 = [get_id(vert) for vert in e0]
e0.append(e0[0])
interior_boundaries[_bnd_id] = e0
_bnd_id += 1
for bnd_id, data in interior_boundaries.items():
boundaries[interior_ibtype][bnd_id] = bdry_type(
indexes=data, properties={})
self._data = boundaries
self._init_dataframes.cache_clear()
self.__call__.cache_clear()
self._init_dataframes()
SortedRingType = Dict[int,
Dict[Literal['exterior', 'interiors'],
Union[npt.NDArray, List[npt.NDArray]]]
]
def sort_rings(
index_rings: List[List[Tuple[int, int]]],
vertices: npt.NDArray[np.float32]) -> SortedRingType:
areas = []
for index_ring in index_rings:
e0, e1 = [list(t) for t in zip(*index_ring)]
areas.append(float(Polygon(vertices[e0, :]).area))
idx = areas.index(np.max(areas))
exterior = index_rings.pop(idx)
areas.pop(idx)
_id = 0
_index_rings = {}
_index_rings[_id] = {
'exterior': np.asarray(exterior),
'interiors': []
}
e0, e1 = [list(t) for t in zip(*exterior)]
path = Path(vertices[e0 + [e0[0]], :], closed=True)
while len(index_rings) > 0:
potential_interiors = []
for i, index_ring in enumerate(index_rings):
e0, e1 = [list(t) for t in zip(*index_ring)]
if path.contains_point(vertices[e0[0], :]):
potential_interiors.append(i)
real_interiors = []
for i, p_interior in reversed(
list(enumerate(potential_interiors))):
_p_interior = index_rings[p_interior]
check = [index_rings[k]
for j, k in
reversed(list(enumerate(potential_interiors)))
if i != j]
has_parent = False
for _path in check:
e0, e1 = [list(t) for t in zip(*_path)]
_path = Path(vertices[e0 + [e0[0]], :], closed=True)
if _path.contains_point(vertices[_p_interior[0][0], :]):
has_parent = True
if not has_parent:
real_interiors.append(p_interior)
for i in reversed(sorted(real_interiors)):
_index_rings[_id]['interiors'].append(
np.asarray(index_rings.pop(i)))
areas.pop(i)
if len(index_rings) > 0:
idx = areas.index(np.max(areas))
exterior = index_rings.pop(idx)
areas.pop(idx)
_id += 1
_index_rings[_id] = {
'exterior': np.asarray(exterior),
'interiors': []
}
e0, e1 = [list(t) for t in zip(*exterior)]
path = Path(vertices[e0 + [e0[0]], :], closed=True)
return _index_rings
def _mesh_interpolate_worker(
coords: npt.NDArray[np.float32],
coords_crs: CRS,
raster_path: Union[str, Path],
chunk_size: Optional[int],
method: Literal['spline', 'linear', 'nearest'] = "spline",
filter_by_shape: bool = False):
coords = np.array(coords)
raster = Raster(raster_path)
idxs = []
values = []
for window in raster.iter_windows(chunk_size=chunk_size, overlap=2):
if not raster.crs.equals(coords_crs):
transformer = Transformer.from_crs(
coords_crs, raster.crs, always_xy=True)
coords[:, 0], coords[:, 1] = transformer.transform(
coords[:, 0], coords[:, 1])
xi = raster.get_x(window)
yi = raster.get_y(window)
zi = raster.get_values(window=window, masked=True)
if not filter_by_shape:
_idxs = np.logical_and(
np.logical_and(
np.min(xi) <= coords[:, 0],
np.max(xi) >= coords[:, 0]),
np.logical_and(
np.min(yi) <= coords[:, 1],
np.max(yi) >= coords[:, 1]))
else:
shape = raster.get_multipolygon()
gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1])
_idxs = gs_pt.intersects(shape)
interp_mask = None
if method == 'spline':
f = RectBivariateSpline(
xi,
np.flip(yi),
np.flipud(zi).T,
kx=3, ky=3, s=0,
)
_values = f.ev(coords[_idxs, 0], coords[_idxs, 1])
elif method in ['nearest', 'linear']:
if np.any(zi.mask):
m_interp = RegularGridInterpolator(
(xi, np.flip(yi)),
np.flipud(zi.mask).T.astype(bool),
method=method
)
interp_mask = m_interp(coords[_idxs]) > 0
f = RegularGridInterpolator(
(xi, np.flip(yi)),
np.flipud(zi).T,
method=method
)
_values = f(coords[_idxs])
else:
raise ValueError(
f"Invalid value method specified <{method}>!")
if interp_mask is not None:
helper = np.ones_like(_values).astype(bool)
helper[interp_mask] = False
_idxs[_idxs] = helper
_values = _values[~interp_mask]
idxs.append(_idxs)
values.append(_values)
return (np.hstack(idxs), np.hstack(values))
| true
| true
|
7905163a068e8bfb14f01c5b7b0743e77515b108
| 2,221
|
py
|
Python
|
envs/babyai/oracle/landmark_correction.py
|
AliengirlLiv/babyai
|
51421ee11538bf110c5b2d0c84a15f783d854e7d
|
[
"MIT"
] | 2
|
2022-02-24T08:47:48.000Z
|
2022-03-23T09:44:22.000Z
|
envs/babyai/oracle/landmark_correction.py
|
AliengirlLiv/babyai
|
51421ee11538bf110c5b2d0c84a15f783d854e7d
|
[
"MIT"
] | null | null | null |
envs/babyai/oracle/landmark_correction.py
|
AliengirlLiv/babyai
|
51421ee11538bf110c5b2d0c84a15f783d854e7d
|
[
"MIT"
] | 1
|
2021-12-27T19:03:38.000Z
|
2021-12-27T19:03:38.000Z
|
import numpy as np
from envs.babyai.oracle.teacher import Teacher
class LandmarkCorrection(Teacher):
def empty_feedback(self):
"""
Return a tensor corresponding to no feedback.
"""
return np.array([-1, -1])
def random_feedback(self):
"""
Return a tensor corresponding to no feedback.
"""
raise NotImplementedError('random feedback not implemented')
def compute_feedback(self):
"""
Return the expert action from the previous timestep.
"""
# TODO: Unhardocde this
# Hardcoded 1 time-step away
# Iterate through the objects and order them by their distance from the current object
# Pick the first one that is closer to the goal than the current object. If none, then return the goal
dist_pos = np.array(self.env.dist_pos)
# Distance agent to objects
agentobj_distances = np.sum(np.abs(dist_pos - self.env.agent_pos), axis=1)
# Distance agent to goal
curr_dist = np.sum(np.abs(self.env.obj_pos - self.env.agent_pos))
# Distance object to goal
goalobj_distances = np.sum(np.abs(dist_pos - self.env.obj_pos), axis=1)
idx_closer = np.where(goalobj_distances < curr_dist)
if len(idx_closer[0]) == 0:
return np.array([self.env.obj_color, self.env.obj_type])
else:
idx_agentobj = range(len(agentobj_distances))
idx_agentobj = [x for _,x in sorted(zip(agentobj_distances, idx_agentobj))]
for idx in idx_agentobj:
if idx in idx_closer[0]:
break
return np.array([self.env.dist_colors[idx], self.env.dist_types[idx]])
def feedback_condition(self):
"""
Returns true when we should give feedback.
Currently returns true when the agent's past action did not match the oracle's action.
"""
# For now, we're being lazy and correcting the agent any time it strays from the agent's optimal set of actions.
# This is kind of sketchy since multiple paths can be optimal.
return len(self.agent_actions) > 0 and (not self.agent_actions[-1] == self.oracle_actions[-1])
| 39.660714
| 120
| 0.638001
|
import numpy as np
from envs.babyai.oracle.teacher import Teacher
class LandmarkCorrection(Teacher):
def empty_feedback(self):
return np.array([-1, -1])
def random_feedback(self):
raise NotImplementedError('random feedback not implemented')
def compute_feedback(self):
dist_pos = np.array(self.env.dist_pos)
agentobj_distances = np.sum(np.abs(dist_pos - self.env.agent_pos), axis=1)
curr_dist = np.sum(np.abs(self.env.obj_pos - self.env.agent_pos))
goalobj_distances = np.sum(np.abs(dist_pos - self.env.obj_pos), axis=1)
idx_closer = np.where(goalobj_distances < curr_dist)
if len(idx_closer[0]) == 0:
return np.array([self.env.obj_color, self.env.obj_type])
else:
idx_agentobj = range(len(agentobj_distances))
idx_agentobj = [x for _,x in sorted(zip(agentobj_distances, idx_agentobj))]
for idx in idx_agentobj:
if idx in idx_closer[0]:
break
return np.array([self.env.dist_colors[idx], self.env.dist_types[idx]])
def feedback_condition(self):
return len(self.agent_actions) > 0 and (not self.agent_actions[-1] == self.oracle_actions[-1])
| true
| true
|
7905175cb5313f14dba18b2ab642aa17679b28db
| 4,260
|
py
|
Python
|
install/app_store/tk-framework-adminui/v0.1.6/python/setup_project/project_delegate.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | null | null | null |
install/app_store/tk-framework-adminui/v0.1.6/python/setup_project/project_delegate.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | null | null | null |
install/app_store/tk-framework-adminui/v0.1.6/python/setup_project/project_delegate.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | 1
|
2020-02-15T10:42:56.000Z
|
2020-02-15T10:42:56.000Z
|
# Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sgtk
from tank.platform.qt import QtGui
from tank.platform.qt import QtCore
from . import project_model
views = sgtk.platform.import_framework("tk-framework-qtwidgets", "views")
class ProjectWidget(QtGui.QFrame):
""" Simple widget that shows a project's thumbnail and name. """
MARGIN = 5
ICON_SIZE = QtCore.QSize(32, 32)
def __init__(self, parent=None):
QtGui.QFrame.__init__(self, parent)
# initialize the UI
# simple frame with a thumbnail and a label
self.setObjectName("frame")
self.setFrameStyle(self.NoFrame)
self.setContentsMargins(self.MARGIN, self.MARGIN, self.MARGIN, self.MARGIN)
self.label = QtGui.QLabel(self)
self.label.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignLeft)
self.label.setWordWrap(True)
self.thumbnail = QtGui.QLabel(self)
self.thumbnail.setScaledContents(True)
self.layout = QtGui.QHBoxLayout(self)
self.layout.addWidget(self.thumbnail)
self.layout.addWidget(self.label)
self.layout.setStretchFactor(self.label, 1)
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
self.setVisible(False)
self.set_selected(False)
def set_thumbnail(self, pixmap):
scaled = pixmap.scaled(self.ICON_SIZE, QtCore.Qt.KeepAspectRatio)
self.thumbnail.setPixmap(scaled)
def set_text(self, label):
metrics = QtGui.QFontMetrics(self.label.font())
elided = metrics.elidedText(label, QtCore.Qt.ElideMiddle, self.label.width())
self.label.setText(elided)
self.setToolTip(label)
def set_selected(self, selected):
""" Update the styling to reflect if the widget is selected or not """
if selected:
p = QtGui.QPalette()
highlight_col = p.color(QtGui.QPalette.Active, QtGui.QPalette.Highlight)
transp_highlight_str = "rgba(%s, %s, %s, 25%%)" % \
(highlight_col.red(), highlight_col.green(), highlight_col.blue())
highlight_str = "rgb(%s, %s, %s)" % \
(highlight_col.red(), highlight_col.green(), highlight_col.blue())
# make a border around the cell
self.setStyleSheet(
"""#frame {
border-width: 2px;
border-color: %s;
border-style: solid;
background-color: %s;
}
""" % (highlight_str, transp_highlight_str))
else:
self.setStyleSheet(
"""#frame {
border-width: 2px;
border-color: transparent;
border-style: solid;
}""")
class ProjectDelegate(views.EditSelectedWidgetDelegate):
""" Wrapper around the ProjectWidget for delegate use """
def __init__(self, view):
views.EditSelectedWidgetDelegate.__init__(self, view)
def _create_widget(self, parent):
return ProjectWidget(parent)
def _on_before_paint(self, widget, model_index, style_options):
if (style_options.state & QtGui.QStyle.State_Selected):
widget.set_selected(True)
else:
widget.set_selected(False)
icon = model_index.data(QtCore.Qt.DecorationRole)
if icon is not None:
thumb = icon.pixmap(30)
widget.set_thumbnail(thumb)
widget.set_text(model_index.data(project_model.ProjectModel.DISPLAY_NAME_ROLE))
def _on_before_selection(self, widget, model_index, style_options):
self._on_before_paint(widget, model_index, style_options)
def sizeHint(self, style_options, model_index):
return QtCore.QSize(175, 2*ProjectWidget.MARGIN + ProjectWidget.ICON_SIZE.height())
| 36.724138
| 91
| 0.64507
|
import sgtk
from tank.platform.qt import QtGui
from tank.platform.qt import QtCore
from . import project_model
views = sgtk.platform.import_framework("tk-framework-qtwidgets", "views")
class ProjectWidget(QtGui.QFrame):
MARGIN = 5
ICON_SIZE = QtCore.QSize(32, 32)
def __init__(self, parent=None):
QtGui.QFrame.__init__(self, parent)
self.setObjectName("frame")
self.setFrameStyle(self.NoFrame)
self.setContentsMargins(self.MARGIN, self.MARGIN, self.MARGIN, self.MARGIN)
self.label = QtGui.QLabel(self)
self.label.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignLeft)
self.label.setWordWrap(True)
self.thumbnail = QtGui.QLabel(self)
self.thumbnail.setScaledContents(True)
self.layout = QtGui.QHBoxLayout(self)
self.layout.addWidget(self.thumbnail)
self.layout.addWidget(self.label)
self.layout.setStretchFactor(self.label, 1)
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
self.setVisible(False)
self.set_selected(False)
def set_thumbnail(self, pixmap):
scaled = pixmap.scaled(self.ICON_SIZE, QtCore.Qt.KeepAspectRatio)
self.thumbnail.setPixmap(scaled)
def set_text(self, label):
metrics = QtGui.QFontMetrics(self.label.font())
elided = metrics.elidedText(label, QtCore.Qt.ElideMiddle, self.label.width())
self.label.setText(elided)
self.setToolTip(label)
def set_selected(self, selected):
if selected:
p = QtGui.QPalette()
highlight_col = p.color(QtGui.QPalette.Active, QtGui.QPalette.Highlight)
transp_highlight_str = "rgba(%s, %s, %s, 25%%)" % \
(highlight_col.red(), highlight_col.green(), highlight_col.blue())
highlight_str = "rgb(%s, %s, %s)" % \
(highlight_col.red(), highlight_col.green(), highlight_col.blue())
self.setStyleSheet(
"""#frame {
border-width: 2px;
border-color: %s;
border-style: solid;
background-color: %s;
}
""" % (highlight_str, transp_highlight_str))
else:
self.setStyleSheet(
"""#frame {
border-width: 2px;
border-color: transparent;
border-style: solid;
}""")
class ProjectDelegate(views.EditSelectedWidgetDelegate):
def __init__(self, view):
views.EditSelectedWidgetDelegate.__init__(self, view)
def _create_widget(self, parent):
return ProjectWidget(parent)
def _on_before_paint(self, widget, model_index, style_options):
if (style_options.state & QtGui.QStyle.State_Selected):
widget.set_selected(True)
else:
widget.set_selected(False)
icon = model_index.data(QtCore.Qt.DecorationRole)
if icon is not None:
thumb = icon.pixmap(30)
widget.set_thumbnail(thumb)
widget.set_text(model_index.data(project_model.ProjectModel.DISPLAY_NAME_ROLE))
def _on_before_selection(self, widget, model_index, style_options):
self._on_before_paint(widget, model_index, style_options)
def sizeHint(self, style_options, model_index):
return QtCore.QSize(175, 2*ProjectWidget.MARGIN + ProjectWidget.ICON_SIZE.height())
| true
| true
|
790517c0469db48faa9be4fb0d00dca9f2d6b7cc
| 1,262
|
py
|
Python
|
tests/refresh_token/test_mutations.py
|
bndr/django-graphql-jwt
|
0b9d7e07ce6d9e7835b1047d54690fd434a2649b
|
[
"MIT"
] | 1
|
2019-06-19T12:05:08.000Z
|
2019-06-19T12:05:08.000Z
|
tests/refresh_token/test_mutations.py
|
CZZLEGEND/django-graphql-jwt
|
6e816445b72e7582d0595fda9e7e5d0486026045
|
[
"MIT"
] | null | null | null |
tests/refresh_token/test_mutations.py
|
CZZLEGEND/django-graphql-jwt
|
6e816445b72e7582d0595fda9e7e5d0486026045
|
[
"MIT"
] | null | null | null |
import graphene
import graphql_jwt
from graphql_jwt.refresh_token.mixins import RefreshTokenMixin
from ..testcases import SchemaTestCase
from . import mixins
class TokenAuthTests(mixins.TokenAuthMixin, SchemaTestCase):
query = '''
mutation TokenAuth($username: String!, $password: String!) {
tokenAuth(username: $username, password: $password) {
token
refreshToken
}
}'''
refresh_token_mutations = {
'token_auth': graphql_jwt.ObtainJSONWebToken,
}
class Refresh(RefreshTokenMixin, graphql_jwt.Refresh):
class Arguments(RefreshTokenMixin.Fields):
"""Refresh Arguments"""
class RefreshTests(mixins.RefreshMixin, SchemaTestCase):
query = '''
mutation RefreshToken($refreshToken: String!) {
refreshToken(refreshToken: $refreshToken) {
token
refreshToken
payload
}
}'''
refresh_token_mutations = {
'refresh_token': Refresh,
}
class RevokeTests(mixins.RevokeMixin, SchemaTestCase):
query = '''
mutation RevokeToken($refreshToken: String!) {
revokeToken(refreshToken: $refreshToken) {
revoked
}
}'''
class Mutation(graphene.ObjectType):
revoke_token = graphql_jwt.Revoke.Field()
| 22.945455
| 64
| 0.674326
|
import graphene
import graphql_jwt
from graphql_jwt.refresh_token.mixins import RefreshTokenMixin
from ..testcases import SchemaTestCase
from . import mixins
class TokenAuthTests(mixins.TokenAuthMixin, SchemaTestCase):
query = '''
mutation TokenAuth($username: String!, $password: String!) {
tokenAuth(username: $username, password: $password) {
token
refreshToken
}
}'''
refresh_token_mutations = {
'token_auth': graphql_jwt.ObtainJSONWebToken,
}
class Refresh(RefreshTokenMixin, graphql_jwt.Refresh):
class Arguments(RefreshTokenMixin.Fields):
class RefreshTests(mixins.RefreshMixin, SchemaTestCase):
query = '''
mutation RefreshToken($refreshToken: String!) {
refreshToken(refreshToken: $refreshToken) {
token
refreshToken
payload
}
}'''
refresh_token_mutations = {
'refresh_token': Refresh,
}
class RevokeTests(mixins.RevokeMixin, SchemaTestCase):
query = '''
mutation RevokeToken($refreshToken: String!) {
revokeToken(refreshToken: $refreshToken) {
revoked
}
}'''
class Mutation(graphene.ObjectType):
revoke_token = graphql_jwt.Revoke.Field()
| true
| true
|
790517e926b78651705bea9ca3366024ff06fabe
| 1,415
|
py
|
Python
|
libs/program_options/test/program_options_size_test.py
|
mike-code/boost_1_38_0
|
7ff8b2069344ea6b0b757aa1f0778dfb8526df3c
|
[
"BSL-1.0"
] | 4
|
2021-07-31T13:56:01.000Z
|
2021-11-13T02:55:10.000Z
|
libs/program_options/test/program_options_size_test.py
|
boost-cmake/vintage
|
dcfb7da3177134eddaee6789d6f582259cb0d6ee
|
[
"BSL-1.0"
] | 1
|
2018-01-17T10:11:43.000Z
|
2018-01-17T10:11:43.000Z
|
libs/program_options/test/program_options_size_test.py
|
boost-cmake/vintage
|
dcfb7da3177134eddaee6789d6f582259cb0d6ee
|
[
"BSL-1.0"
] | 7
|
2021-08-31T14:34:23.000Z
|
2022-01-19T08:25:58.000Z
|
#!/usr/bin/python
import os
import string
call = " hook(10);\n";
call = " hook(10); hook2(10);hook3(0);hook4(0);\n";
def run_test(num_calls, compiler_command):
f = open("program_options_test.cpp", "w")
f.write("""#include <boost/program_options.hpp>
using namespace boost::program_options;
void do_it()
{
boost::program_options::options_description desc;
desc.add_options()
""")
for i in range(0, num_calls):
f.write("(\"opt%d\", value<int>())\n")
f.write(";\n}\n")
f.close()
os.system(compiler_command + " -c -save-temps -I /home/ghost/Work/boost-rc program_options_test.cpp")
nm = os.popen("nm -S program_options_test.o")
for l in nm:
if string.find(l, "Z5do_itv") != -1:
break
size = int(string.split(l)[1], 16)
return size
def run_tests(range, compiler_command):
last_size = None
first_size = None
for num in range:
size = run_test(num, compiler_command)
if last_size:
print "%2d calls: %5d bytes (+ %d)" % (num, size, size-last_size)
else:
print "%2d calls: %5d bytes" % (num, size)
first_size = size
last_size = size
print "Avarage: ", (last_size-first_size)/(range[-1]-range[0])
if __name__ == '__main__':
for compiler in [ "g++-3.3 -Os", "g++-3.3 -O3", "g++-3.4 -Os", "g++-3.4 -O3"]:
print "****", compiler, "****"
run_tests(range(1, 20), compiler)
| 26.203704
| 104
| 0.601413
|
import os
import string
call = " hook(10);\n";
call = " hook(10); hook2(10);hook3(0);hook4(0);\n";
def run_test(num_calls, compiler_command):
f = open("program_options_test.cpp", "w")
f.write("""#include <boost/program_options.hpp>
using namespace boost::program_options;
void do_it()
{
boost::program_options::options_description desc;
desc.add_options()
""")
for i in range(0, num_calls):
f.write("(\"opt%d\", value<int>())\n")
f.write(";\n}\n")
f.close()
os.system(compiler_command + " -c -save-temps -I /home/ghost/Work/boost-rc program_options_test.cpp")
nm = os.popen("nm -S program_options_test.o")
for l in nm:
if string.find(l, "Z5do_itv") != -1:
break
size = int(string.split(l)[1], 16)
return size
def run_tests(range, compiler_command):
last_size = None
first_size = None
for num in range:
size = run_test(num, compiler_command)
if last_size:
print "%2d calls: %5d bytes (+ %d)" % (num, size, size-last_size)
else:
print "%2d calls: %5d bytes" % (num, size)
first_size = size
last_size = size
print "Avarage: ", (last_size-first_size)/(range[-1]-range[0])
if __name__ == '__main__':
for compiler in [ "g++-3.3 -Os", "g++-3.3 -O3", "g++-3.4 -Os", "g++-3.4 -O3"]:
print "****", compiler, "****"
run_tests(range(1, 20), compiler)
| false
| true
|
7905186c64c946965ba1e31fd8e313ed7c6f8dba
| 11,407
|
py
|
Python
|
primitives/image_classification/utils/imagenet.py
|
Yonder-OSS/D3M-Primitives
|
b5f2c14d2afdadc6e97316aae5dd33fe4b874b09
|
[
"MIT"
] | null | null | null |
primitives/image_classification/utils/imagenet.py
|
Yonder-OSS/D3M-Primitives
|
b5f2c14d2afdadc6e97316aae5dd33fe4b874b09
|
[
"MIT"
] | 2
|
2020-03-25T15:36:39.000Z
|
2020-03-25T16:32:26.000Z
|
primitives/image_classification/utils/imagenet.py
|
Yonder-OSS/D3M-Primitives
|
b5f2c14d2afdadc6e97316aae5dd33fe4b874b09
|
[
"MIT"
] | null | null | null |
'''
Bootstrapped from https://github.com/NewKnowledge/imagenet and refined for D3M purposes
Original implementation from Craig Corcoran
'''
import os
import math
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications import inception_v3, mobilenet_v2, xception
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, GlobalMaxPooling2D
from tensorflow.keras.utils import to_categorical, Sequence
import logging
logger = logging.getLogger(__name__)
#logger.setLevel(logging.INFO)
class ImagenetModel:
''' A class for featurizing images using pre-trained neural nets on ImageNet
and finetuning those nets for downstream classification
'''
def __init__(self,
model='inception_v3',
weights = 'imagenet',
include_top = False,
pooling=None,
n_channels=None,
clf_head_dense_dim = 1024,
):
''' Creates ImageNet base model for featurization or classification and corresponding image
preprocessing function
:param model: options are xception, inception_v3, and mobilenet_v2
:param weights: 'imagenet' or filepath
:param include_top: whether to include original ImageNet classification head with 1000 classes
:param pooling: 'avg', 'max', or None
:param n_channels: number of channels to keep if performing featurization
:param clf_head_dense_dim: dimension of dense layer before softmax classification (only applies
if `include_top` is false)
'''
self.include_top = include_top # determines if used for classification or featurization
self.n_channels = n_channels
self.pooling = pooling
self.clf_head_dense_dim = clf_head_dense_dim
if model == 'xception':
self.model = xception.Xception(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = xception.preprocess_input
self.target_size = (299, 299)
if include_top:
self.decode = xception.decode_predictions
else:
self.output_dim = (n_channels if n_channels else 2048) * (1 if pooling else 10**2)
elif model == 'inception_v3':
self.model = inception_v3.InceptionV3(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = inception_v3.preprocess_input
self.target_size = (299, 299)
if include_top:
self.decode = inception_v3.decode_predictions
else:
self.output_dim = (n_channels if n_channels else 2048) * (1 if pooling else 8**2)
elif model == 'mobilenet_v2':
self.model = mobilenetv2.MobileNetV2(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = mobilenetv2.preprocess_input
self.target_size = (244, 244)
if include_top:
self.decode = mobilenetv2.decode_predictions
else:
self.output_dim = (n_channels if n_channels else 1280) * (1 if pooling else 7**2)
else:
raise Exception('model option not implemented')
def _load_finetune_model(
self,
nclasses = 2,
weights_path = None,
):
''' Constructs finetuning model architecture and optionally loads weights
:param nclasses: number of classes on which to softmax over
:param weights_path: optional filepath from which to try to load weights
'''
out = self.model.output
if self.pooling is None:
out = GlobalAveragePooling2D()(out)# if self.pooling == 'avg' else GlobalMaxPooling2D()(out)
dense = Dense(self.clf_head_dense_dim, activation='relu')(out)
preds = Dense(nclasses, activation='softmax')(dense)
finetune_model = Model(inputs = self.model.input, outputs = preds)
# try to load weights
if weights_path is not None:
if os.path.isfile(weights_path):
finetune_model.load_weights(weights_path)
return finetune_model
def get_features(self, images_array):
''' takes a batch of images as a 4-d array and returns the (flattened) imagenet features for those images as a 2-d array '''
if self.include_top:
raise Exception('getting features from a classification model with include_top=True is currently not supported')
if images_array.ndim != 4:
raise Exception('invalid input shape for images_array, expects a 4d array')
# preprocess and compute image features
logger.debug(f'preprocessing {images_array.shape[0]} images')
images_array = self.preprocess(images_array)
logger.debug(f'computing image features')
image_features = self.model.predict(images_array)
# if n_channels is specified, only keep that number of channels
if self.n_channels:
logger.debug(f'truncating to first {self.n_channels} channels')
image_features = image_features.T[: self.n_channels].T
# reshape output array by flattening each image into a vector of features
shape = image_features.shape
return image_features.reshape(shape[0], np.prod(shape[1:]))
def predict(self, images_array):
''' alias for get_features to more closely match scikit-learn interface '''
return self.get_features(images_array)
def finetune(self,
train_dataset,
val_dataset = None,
nclasses = 2,
top_layer_epochs = 1,
unfreeze_proportions = [0.5],
all_layer_epochs = 5,
class_weight = None,
optimizer_top = 'rmsprop',
optimizer_full = 'sgd',
callbacks = None,
num_workers = 8,
load_weights_path = None,
save_weights_path = None,
):
''' Finetunes the Imagenet model iteratively on a smaller set of images with (potentially) a smaller set of classes.
First finetunes last layer then freezes bottom N layers and retrains the rest
:param train_dataset: (X, y) pair of tf.constant tensors for training
:param val_dataset: (X, y) pair of tf.constant tensors for validation, optional
:param nclasses: number of classes
:param top_layer_epochs: how many epochs for which to finetune classification head (happens first)
:param unfreeze_proportions: list of proportions representing how much of the base ImageNet model one wants to
unfreeze (later layers unfrozen) for another round of finetuning
:param all_layer_epochs: how many epochs for which to finetune entire model (happens second)
:param class_weight: class weights (used for both training steps)
:param optimizer_top: optimizer to use for training of classification head
:param optimizer_full: optimizer to use for training full classification model
* suggest to use lower learning rate / more conservative optimizer for this step to
prevent catastrophic forgetting
:param callbacks: optional list of callbacks to use for each round of finetuning
:param num_workers: number of workers to use for multiprocess data loading
:param load_weights_path: optional filepath from which to try to load weights
:param save_weights_path: optional filepath to which to store weights
'''
finetune_model = self._load_finetune_model(
nclasses = nclasses,
weights_path=load_weights_path
)
fitting_histories = []
# freeze all convolutional InceptionV3 layers, retrain top layer
for layer in self.model.layers:
layer.trainable = False
finetune_model.compile(
optimizer=optimizer_top,
loss='categorical_crossentropy')
fitting_histories.append(
finetune_model.fit(
train_dataset,
validation_data = val_dataset,
epochs = top_layer_epochs,
class_weight = class_weight,
shuffle = True,
use_multiprocessing = True,
workers = num_workers,
callbacks = callbacks
)
)
# iteratively unfreeze specified proportion of later ImageNet base layers and finetune
finetune_model.compile(
# SGD(lr=0.0001, momentum=0.9)
optimizer=optimizer_full,
loss='categorical_crossentropy')
for p in unfreeze_proportions:
freeze_count = int(len(self.model.layers) * p)
for layer in finetune_model.layers[:freeze_count]:
layer.trainable = False
for layer in finetune_model.layers[freeze_count:]:
layer.trainable = True
fitting_histories.append(
finetune_model.fit(
train_dataset,
validation_data = val_dataset,
epochs = all_layer_epochs,
class_weight = class_weight,
shuffle = True,
use_multiprocessing = True,
workers = num_workers,
callbacks = callbacks
)
)
# save weights
if save_weights_path is not None:
finetune_model.save_weights(save_weights_path)
return fitting_histories
def finetune_classify(self,
test_dataset,
nclasses = 2,
num_workers = 8,
load_weights_path = None,
):
''' Uses the finetuned model to predict on a test dataset.
:param test_dataset: X, tf.constant tensor for inference
:param nclasses: number of classes
:param num_workers: number of workers to use for multiprocess data loading
:return: array of softmaxed prediction probabilities
:param load_weights_path: optional filepath from which to try to load weights
'''
finetune_model = self._load_finetune_model(
nclasses = nclasses,
weights_path = load_weights_path
)
return finetune_model.predict_generator(test_dataset,
use_multiprocessing = True,
workers = num_workers
)
class ImageNetGen(Sequence):
""" Tf.Keras Sequence for ImageNet input data """
def __init__(self, X, y = None, batch_size = 32):
self.X = X
self.y = y
self.batch_size = batch_size
def __len__(self):
return math.ceil(self.X.shape[0] / self.batch_size)
def __getitem__(self, idx):
batch_x = self.X[idx * self.batch_size:(idx + 1) * self.batch_size]
if self.y is None:
return tf.constant(batch_x)
else:
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
return tf.constant(batch_x), tf.constant(batch_y)
| 43.208333
| 132
| 0.62111
|
import os
import math
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications import inception_v3, mobilenet_v2, xception
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, GlobalMaxPooling2D
from tensorflow.keras.utils import to_categorical, Sequence
import logging
logger = logging.getLogger(__name__)
class ImagenetModel:
def __init__(self,
model='inception_v3',
weights = 'imagenet',
include_top = False,
pooling=None,
n_channels=None,
clf_head_dense_dim = 1024,
):
self.include_top = include_top
self.n_channels = n_channels
self.pooling = pooling
self.clf_head_dense_dim = clf_head_dense_dim
if model == 'xception':
self.model = xception.Xception(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = xception.preprocess_input
self.target_size = (299, 299)
if include_top:
self.decode = xception.decode_predictions
else:
self.output_dim = (n_channels if n_channels else 2048) * (1 if pooling else 10**2)
elif model == 'inception_v3':
self.model = inception_v3.InceptionV3(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = inception_v3.preprocess_input
self.target_size = (299, 299)
if include_top:
self.decode = inception_v3.decode_predictions
else:
self.output_dim = (n_channels if n_channels else 2048) * (1 if pooling else 8**2)
elif model == 'mobilenet_v2':
self.model = mobilenetv2.MobileNetV2(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = mobilenetv2.preprocess_input
self.target_size = (244, 244)
if include_top:
self.decode = mobilenetv2.decode_predictions
else:
self.output_dim = (n_channels if n_channels else 1280) * (1 if pooling else 7**2)
else:
raise Exception('model option not implemented')
def _load_finetune_model(
self,
nclasses = 2,
weights_path = None,
):
out = self.model.output
if self.pooling is None:
out = GlobalAveragePooling2D()(out)
dense = Dense(self.clf_head_dense_dim, activation='relu')(out)
preds = Dense(nclasses, activation='softmax')(dense)
finetune_model = Model(inputs = self.model.input, outputs = preds)
if weights_path is not None:
if os.path.isfile(weights_path):
finetune_model.load_weights(weights_path)
return finetune_model
def get_features(self, images_array):
if self.include_top:
raise Exception('getting features from a classification model with include_top=True is currently not supported')
if images_array.ndim != 4:
raise Exception('invalid input shape for images_array, expects a 4d array')
logger.debug(f'preprocessing {images_array.shape[0]} images')
images_array = self.preprocess(images_array)
logger.debug(f'computing image features')
image_features = self.model.predict(images_array)
if self.n_channels:
logger.debug(f'truncating to first {self.n_channels} channels')
image_features = image_features.T[: self.n_channels].T
shape = image_features.shape
return image_features.reshape(shape[0], np.prod(shape[1:]))
def predict(self, images_array):
return self.get_features(images_array)
def finetune(self,
train_dataset,
val_dataset = None,
nclasses = 2,
top_layer_epochs = 1,
unfreeze_proportions = [0.5],
all_layer_epochs = 5,
class_weight = None,
optimizer_top = 'rmsprop',
optimizer_full = 'sgd',
callbacks = None,
num_workers = 8,
load_weights_path = None,
save_weights_path = None,
):
finetune_model = self._load_finetune_model(
nclasses = nclasses,
weights_path=load_weights_path
)
fitting_histories = []
for layer in self.model.layers:
layer.trainable = False
finetune_model.compile(
optimizer=optimizer_top,
loss='categorical_crossentropy')
fitting_histories.append(
finetune_model.fit(
train_dataset,
validation_data = val_dataset,
epochs = top_layer_epochs,
class_weight = class_weight,
shuffle = True,
use_multiprocessing = True,
workers = num_workers,
callbacks = callbacks
)
)
finetune_model.compile(
optimizer=optimizer_full,
loss='categorical_crossentropy')
for p in unfreeze_proportions:
freeze_count = int(len(self.model.layers) * p)
for layer in finetune_model.layers[:freeze_count]:
layer.trainable = False
for layer in finetune_model.layers[freeze_count:]:
layer.trainable = True
fitting_histories.append(
finetune_model.fit(
train_dataset,
validation_data = val_dataset,
epochs = all_layer_epochs,
class_weight = class_weight,
shuffle = True,
use_multiprocessing = True,
workers = num_workers,
callbacks = callbacks
)
)
if save_weights_path is not None:
finetune_model.save_weights(save_weights_path)
return fitting_histories
def finetune_classify(self,
test_dataset,
nclasses = 2,
num_workers = 8,
load_weights_path = None,
):
finetune_model = self._load_finetune_model(
nclasses = nclasses,
weights_path = load_weights_path
)
return finetune_model.predict_generator(test_dataset,
use_multiprocessing = True,
workers = num_workers
)
class ImageNetGen(Sequence):
def __init__(self, X, y = None, batch_size = 32):
self.X = X
self.y = y
self.batch_size = batch_size
def __len__(self):
return math.ceil(self.X.shape[0] / self.batch_size)
def __getitem__(self, idx):
batch_x = self.X[idx * self.batch_size:(idx + 1) * self.batch_size]
if self.y is None:
return tf.constant(batch_x)
else:
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
return tf.constant(batch_x), tf.constant(batch_y)
| true
| true
|
79051875326eec5af6b3da7e8f20a14dc6f5417e
| 49,430
|
py
|
Python
|
src/kbnet.py
|
alexklwong/calibrated-backprojection-network
|
57dbec03c6da94ee0cd020b6de5f02e7e8ee726e
|
[
"Intel"
] | 38
|
2021-08-28T06:01:25.000Z
|
2022-03-03T03:23:23.000Z
|
src/kbnet.py
|
alexklwong/calibrated-backprojection-network
|
57dbec03c6da94ee0cd020b6de5f02e7e8ee726e
|
[
"Intel"
] | 14
|
2021-11-15T12:30:34.000Z
|
2022-03-30T14:03:16.000Z
|
src/kbnet.py
|
alexklwong/calibrated-backprojection-network
|
57dbec03c6da94ee0cd020b6de5f02e7e8ee726e
|
[
"Intel"
] | 9
|
2021-10-19T23:45:07.000Z
|
2021-12-20T07:45:37.000Z
|
'''
Author: Alex Wong <alexw@cs.ucla.edu>
If you use this code, please cite the following paper:
A. Wong, and S. Soatto. Unsupervised Depth Completion with Calibrated Backprojection Layers.
https://arxiv.org/pdf/2108.10531.pdf
@inproceedings{wong2021unsupervised,
title={Unsupervised Depth Completion with Calibrated Backprojection Layers},
author={Wong, Alex and Soatto, Stefano},
booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
pages={12747--12756},
year={2021}
}
'''
import os, time
import numpy as np
import torch
from PIL import Image
from torch.utils.tensorboard import SummaryWriter
import datasets, data_utils, eval_utils
from log_utils import log
from kbnet_model import KBNetModel
from posenet_model import PoseNetModel
import global_constants as settings
from transforms import Transforms
from net_utils import OutlierRemoval
def train(train_image_path,
train_sparse_depth_path,
train_intrinsics_path,
val_image_path,
val_sparse_depth_path,
val_intrinsics_path,
val_ground_truth_path,
# Batch settings
n_batch=settings.N_BATCH,
n_height=settings.N_HEIGHT,
n_width=settings.N_WIDTH,
# Input settings
input_channels_image=settings.INPUT_CHANNELS_IMAGE,
input_channels_depth=settings.INPUT_CHANNELS_DEPTH,
normalized_image_range=settings.NORMALIZED_IMAGE_RANGE,
outlier_removal_kernel_size=settings.OUTLIER_REMOVAL_KERNEL_SIZE,
outlier_removal_threshold=settings.OUTLIER_REMOVAL_THRESHOLD,
# Sparse to dense pool settings
min_pool_sizes_sparse_to_dense_pool=settings.MIN_POOL_SIZES_SPARSE_TO_DENSE_POOL,
max_pool_sizes_sparse_to_dense_pool=settings.MAX_POOL_SIZES_SPARSE_TO_DENSE_POOL,
n_convolution_sparse_to_dense_pool=settings.N_CONVOLUTION_SPARSE_TO_DENSE_POOL,
n_filter_sparse_to_dense_pool=settings.N_FILTER_SPARSE_TO_DENSE_POOL,
# Depth network settings
n_filters_encoder_image=settings.N_FILTERS_ENCODER_IMAGE,
n_filters_encoder_depth=settings.N_FILTERS_ENCODER_DEPTH,
resolutions_backprojection=settings.RESOLUTIONS_BACKPROJECTION,
n_filters_decoder=settings.N_FILTERS_DECODER,
deconv_type=settings.DECONV_TYPE,
min_predict_depth=settings.MIN_PREDICT_DEPTH,
max_predict_depth=settings.MAX_PREDICT_DEPTH,
# Weight settings
weight_initializer=settings.WEIGHT_INITIALIZER,
activation_func=settings.ACTIVATION_FUNC,
# Training settings
learning_rates=settings.LEARNING_RATES,
learning_schedule=settings.LEARNING_SCHEDULE,
augmentation_probabilities=settings.AUGMENTATION_PROBABILITIES,
augmentation_schedule=settings.AUGMENTATION_SCHEDULE,
augmentation_random_crop_type=settings.AUGMENTATION_RANDOM_CROP_TYPE,
augmentation_random_flip_type=settings.AUGMENTATION_RANDOM_FLIP_TYPE,
augmentation_random_remove_points=settings.AUGMENTATION_RANDOM_REMOVE_POINTS,
augmentation_random_noise_type=settings.AUGMENTATION_RANDOM_NOISE_TYPE,
augmentation_random_noise_spread=settings.AUGMENTATION_RANDOM_NOISE_SPREAD,
# Loss function settings
w_color=settings.W_COLOR,
w_structure=settings.W_STRUCTURE,
w_sparse_depth=settings.W_SPARSE_DEPTH,
w_smoothness=settings.W_SMOOTHNESS,
w_weight_decay_depth=settings.W_WEIGHT_DECAY_DEPTH,
w_weight_decay_pose=settings.W_WEIGHT_DECAY_POSE,
# Evaluation settings
min_evaluate_depth=settings.MIN_EVALUATE_DEPTH,
max_evaluate_depth=settings.MAX_EVALUATE_DEPTH,
# Checkpoint settings
checkpoint_path=settings.CHECKPOINT_PATH,
n_checkpoint=settings.N_CHECKPOINT,
n_summary=settings.N_SUMMARY,
n_summary_display=settings.N_SUMMARY_DISPLAY,
validation_start_step=settings.VALIDATION_START_STEP,
depth_model_restore_path=settings.RESTORE_PATH,
pose_model_restore_path=settings.RESTORE_PATH,
# Hardware settings
device=settings.DEVICE,
n_thread=settings.N_THREAD):
if device == settings.CUDA or device == settings.GPU:
device = torch.device(settings.CUDA)
else:
device = torch.device(settings.CPU)
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
# Set up checkpoint and event paths
depth_model_checkpoint_path = os.path.join(checkpoint_path, 'depth_model-{}.pth')
pose_model_checkpoint_path = os.path.join(checkpoint_path, 'pose_model-{}.pth')
log_path = os.path.join(checkpoint_path, 'results.txt')
event_path = os.path.join(checkpoint_path, 'events')
best_results = {
'step': -1,
'mae': np.infty,
'rmse': np.infty,
'imae': np.infty,
'irmse': np.infty
}
'''
Load input paths and set up dataloaders
'''
# Read paths for training
train_image_paths = data_utils.read_paths(train_image_path)
train_sparse_depth_paths = data_utils.read_paths(train_sparse_depth_path)
train_intrinsics_paths = data_utils.read_paths(train_intrinsics_path)
n_train_sample = len(train_image_paths)
assert len(train_sparse_depth_paths) == n_train_sample
assert len(train_intrinsics_paths) == n_train_sample
n_train_step = \
learning_schedule[-1] * np.ceil(n_train_sample / n_batch).astype(np.int32)
train_dataloader = torch.utils.data.DataLoader(
datasets.KBNetTrainingDataset(
image_paths=train_image_paths,
sparse_depth_paths=train_sparse_depth_paths,
intrinsics_paths=train_intrinsics_paths,
shape=(n_height, n_width),
random_crop_type=augmentation_random_crop_type),
batch_size=n_batch,
shuffle=True,
num_workers=n_thread,
drop_last=False)
train_transforms = Transforms(
normalized_image_range=normalized_image_range,
random_flip_type=augmentation_random_flip_type,
random_remove_points=augmentation_random_remove_points,
random_noise_type=augmentation_random_noise_type,
random_noise_spread=augmentation_random_noise_spread)
# Load validation data if it is available
validation_available = val_image_path is not None and \
val_sparse_depth_path is not None and \
val_intrinsics_path is not None and \
val_ground_truth_path is not None
if validation_available:
val_image_paths = data_utils.read_paths(val_image_path)
val_sparse_depth_paths = data_utils.read_paths(val_sparse_depth_path)
val_intrinsics_paths = data_utils.read_paths(val_intrinsics_path)
val_ground_truth_paths = data_utils.read_paths(val_ground_truth_path)
n_val_sample = len(val_image_paths)
assert len(val_sparse_depth_paths) == n_val_sample
assert len(val_intrinsics_paths) == n_val_sample
assert len(val_ground_truth_paths) == n_val_sample
ground_truths = []
for path in val_ground_truth_paths:
ground_truth, validity_map = data_utils.load_depth_with_validity_map(path)
ground_truths.append(np.stack([ground_truth, validity_map], axis=-1))
val_dataloader = torch.utils.data.DataLoader(
datasets.KBNetInferenceDataset(
image_paths=val_image_paths,
sparse_depth_paths=val_sparse_depth_paths,
intrinsics_paths=val_intrinsics_paths),
batch_size=1,
shuffle=False,
num_workers=1,
drop_last=False)
val_transforms = Transforms(
normalized_image_range=normalized_image_range)
# Initialize outlier removal for sparse depth
outlier_removal = OutlierRemoval(
kernel_size=outlier_removal_kernel_size,
threshold=outlier_removal_threshold)
'''
Set up the model
'''
# Build KBNet (depth) network
depth_model = KBNetModel(
input_channels_image=input_channels_image,
input_channels_depth=input_channels_depth,
min_pool_sizes_sparse_to_dense_pool=min_pool_sizes_sparse_to_dense_pool,
max_pool_sizes_sparse_to_dense_pool=max_pool_sizes_sparse_to_dense_pool,
n_convolution_sparse_to_dense_pool=n_convolution_sparse_to_dense_pool,
n_filter_sparse_to_dense_pool=n_filter_sparse_to_dense_pool,
n_filters_encoder_image=n_filters_encoder_image,
n_filters_encoder_depth=n_filters_encoder_depth,
resolutions_backprojection=resolutions_backprojection,
n_filters_decoder=n_filters_decoder,
deconv_type=deconv_type,
weight_initializer=weight_initializer,
activation_func=activation_func,
min_predict_depth=min_predict_depth,
max_predict_depth=max_predict_depth,
device=device)
parameters_depth_model = depth_model.parameters()
depth_model.train()
# Bulid PoseNet (only needed for training) network
pose_model = PoseNetModel(
encoder_type='resnet18',
rotation_parameterization='axis',
weight_initializer=weight_initializer,
activation_func='relu',
device=device)
parameters_pose_model = pose_model.parameters()
pose_model.train()
if depth_model_restore_path is not None and depth_model_restore_path != '':
depth_model.restore_model(depth_model_restore_path)
if pose_model_restore_path is not None and pose_model_restore_path != '':
pose_model.restore_model(pose_model_restore_path)
# Set up tensorboard summary writers
train_summary_writer = SummaryWriter(event_path + '-train')
val_summary_writer = SummaryWriter(event_path + '-val')
'''
Log input paths
'''
log('Training input paths:', log_path)
train_input_paths = [
train_image_path,
train_sparse_depth_path,
train_intrinsics_path
]
for path in train_input_paths:
log(path, log_path)
log('', log_path)
log('Validation input paths:', log_path)
val_input_paths = [
val_image_path,
val_sparse_depth_path,
val_intrinsics_path,
val_ground_truth_path
]
for path in val_input_paths:
log(path, log_path)
log('', log_path)
'''
Log all settings
'''
log_input_settings(
log_path,
# Batch settings
n_batch=n_batch,
n_height=n_height,
n_width=n_width,
# Input settings
input_channels_image=input_channels_image,
input_channels_depth=input_channels_depth,
normalized_image_range=normalized_image_range,
outlier_removal_kernel_size=outlier_removal_kernel_size,
outlier_removal_threshold=outlier_removal_threshold)
log_network_settings(
log_path,
# Sparse to dense pool settings
min_pool_sizes_sparse_to_dense_pool=min_pool_sizes_sparse_to_dense_pool,
max_pool_sizes_sparse_to_dense_pool=max_pool_sizes_sparse_to_dense_pool,
n_convolution_sparse_to_dense_pool=n_convolution_sparse_to_dense_pool,
n_filter_sparse_to_dense_pool=n_filter_sparse_to_dense_pool,
# Depth network settings
n_filters_encoder_image=n_filters_encoder_image,
n_filters_encoder_depth=n_filters_encoder_depth,
resolutions_backprojection=resolutions_backprojection,
n_filters_decoder=n_filters_decoder,
deconv_type=deconv_type,
min_predict_depth=min_predict_depth,
max_predict_depth=max_predict_depth,
# Weight settings
weight_initializer=weight_initializer,
activation_func=activation_func,
parameters_depth_model=parameters_depth_model,
parameters_pose_model=parameters_pose_model)
log_training_settings(
log_path,
# Training settings
n_batch=n_batch,
n_train_sample=n_train_sample,
n_train_step=n_train_step,
learning_rates=learning_rates,
learning_schedule=learning_schedule,
# Augmentation settings
augmentation_probabilities=augmentation_probabilities,
augmentation_schedule=augmentation_schedule,
augmentation_random_crop_type=augmentation_random_crop_type,
augmentation_random_flip_type=augmentation_random_flip_type,
augmentation_random_remove_points=augmentation_random_remove_points,
augmentation_random_noise_type=augmentation_random_noise_type,
augmentation_random_noise_spread=augmentation_random_noise_spread)
log_loss_func_settings(
log_path,
# Loss function settings
w_color=w_color,
w_structure=w_structure,
w_sparse_depth=w_sparse_depth,
w_smoothness=w_smoothness,
w_weight_decay_depth=w_weight_decay_depth,
w_weight_decay_pose=w_weight_decay_pose)
log_evaluation_settings(
log_path,
min_evaluate_depth=min_evaluate_depth,
max_evaluate_depth=max_evaluate_depth)
log_system_settings(
log_path,
# Checkpoint settings
checkpoint_path=checkpoint_path,
n_checkpoint=n_checkpoint,
summary_event_path=event_path,
n_summary=n_summary,
n_summary_display=n_summary_display,
validation_start_step=validation_start_step,
depth_model_restore_path=depth_model_restore_path,
pose_model_restore_path=pose_model_restore_path,
# Hardware settings
device=device,
n_thread=n_thread)
'''
Train model
'''
# Initialize optimizer with starting learning rate
learning_schedule_pos = 0
learning_rate = learning_rates[0]
augmentation_schedule_pos = 0
augmentation_probability = augmentation_probabilities[0]
optimizer = torch.optim.Adam([
{
'params' : parameters_depth_model,
'weight_decay' : w_weight_decay_depth
},
{
'params' : parameters_pose_model,
'weight_decay' : w_weight_decay_pose
}],
lr=learning_rate)
# Start training
train_step = 0
time_start = time.time()
log('Begin training...', log_path)
for epoch in range(1, learning_schedule[-1] + 1):
# Set learning rate schedule
if epoch > learning_schedule[learning_schedule_pos]:
learning_schedule_pos = learning_schedule_pos + 1
learning_rate = learning_rates[learning_schedule_pos]
# Update optimizer learning rates
for g in optimizer.param_groups:
g['lr'] = learning_rate
# Set augmentation schedule
if -1 not in augmentation_schedule and epoch > augmentation_schedule[augmentation_schedule_pos]:
augmentation_schedule_pos = augmentation_schedule_pos + 1
augmentation_probability = augmentation_probabilities[augmentation_schedule_pos]
for inputs in train_dataloader:
train_step = train_step + 1
# Fetch data
inputs = [
in_.to(device) for in_ in inputs
]
image0, image1, image2, sparse_depth0, intrinsics = inputs
# Validity map is where sparse depth is available
validity_map_depth0 = torch.where(
sparse_depth0 > 0,
torch.ones_like(sparse_depth0),
sparse_depth0)
# Remove outlier points and update sparse depth and validity map
filtered_sparse_depth0, \
filtered_validity_map_depth0 = outlier_removal.remove_outliers(
sparse_depth=sparse_depth0,
validity_map=validity_map_depth0)
# Do data augmentation
[image0, image1, image2], \
[sparse_depth0], \
[filtered_sparse_depth0, filtered_validity_map_depth0] = train_transforms.transform(
images_arr=[image0, image1, image2],
range_maps_arr=[sparse_depth0],
validity_maps_arr=[filtered_sparse_depth0, filtered_validity_map_depth0],
random_transform_probability=augmentation_probability)
# Forward through the network
output_depth0 = depth_model.forward(
image=image0,
sparse_depth=sparse_depth0,
validity_map_depth=filtered_validity_map_depth0,
intrinsics=intrinsics)
pose01 = pose_model.forward(image0, image1)
pose02 = pose_model.forward(image0, image2)
# Compute loss function
loss, loss_info = depth_model.compute_loss(
image0=image0,
image1=image1,
image2=image2,
output_depth0=output_depth0,
sparse_depth0=filtered_sparse_depth0,
validity_map_depth0=filtered_validity_map_depth0,
intrinsics=intrinsics,
pose01=pose01,
pose02=pose02,
w_color=w_color,
w_structure=w_structure,
w_sparse_depth=w_sparse_depth,
w_smoothness=w_smoothness)
# Compute gradient and backpropagate
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (train_step % n_summary) == 0:
image01 = loss_info.pop('image01')
image02 = loss_info.pop('image02')
depth_model.log_summary(
summary_writer=train_summary_writer,
tag='train',
step=train_step,
image0=image0,
image01=image01,
image02=image02,
output_depth0=output_depth0,
sparse_depth0=filtered_sparse_depth0,
validity_map0=filtered_validity_map_depth0,
pose01=pose01,
pose02=pose02,
scalars=loss_info,
n_display=min(n_batch, n_summary_display))
# Log results and save checkpoints
if (train_step % n_checkpoint) == 0:
time_elapse = (time.time() - time_start) / 3600
time_remain = (n_train_step - train_step) * time_elapse / train_step
log('Step={:6}/{} Loss={:.5f} Time Elapsed={:.2f}h Time Remaining={:.2f}h'.format(
train_step, n_train_step, loss.item(), time_elapse, time_remain),
log_path)
if train_step >= validation_start_step and validation_available:
# Switch to validation mode
depth_model.eval()
with torch.no_grad():
best_results = validate(
depth_model=depth_model,
dataloader=val_dataloader,
transforms=val_transforms,
outlier_removal=outlier_removal,
ground_truths=ground_truths,
step=train_step,
best_results=best_results,
min_evaluate_depth=min_evaluate_depth,
max_evaluate_depth=max_evaluate_depth,
device=device,
summary_writer=val_summary_writer,
n_summary_display=n_summary_display,
log_path=log_path)
# Switch back to training
depth_model.train()
# Save checkpoints
depth_model.save_model(
depth_model_checkpoint_path.format(train_step), train_step, optimizer)
pose_model.save_model(
pose_model_checkpoint_path.format(train_step), train_step, optimizer)
# Save checkpoints
depth_model.save_model(
depth_model_checkpoint_path.format(train_step), train_step, optimizer)
pose_model.save_model(
pose_model_checkpoint_path.format(train_step), train_step, optimizer)
def validate(depth_model,
dataloader,
transforms,
outlier_removal,
ground_truths,
step,
best_results,
min_evaluate_depth,
max_evaluate_depth,
device,
summary_writer,
n_summary_display=4,
n_summary_display_interval=250,
log_path=None):
n_sample = len(dataloader)
mae = np.zeros(n_sample)
rmse = np.zeros(n_sample)
imae = np.zeros(n_sample)
irmse = np.zeros(n_sample)
image_summary = []
output_depth_summary = []
sparse_depth_summary = []
validity_map_summary = []
ground_truth_summary = []
for idx, (inputs, ground_truth) in enumerate(zip(dataloader, ground_truths)):
# Move inputs to device
inputs = [
in_.to(device) for in_ in inputs
]
image, sparse_depth, intrinsics = inputs
ground_truth = np.expand_dims(ground_truth, axis=0)
ground_truth = np.transpose(ground_truth, (0, 3, 1, 2))
ground_truth = torch.from_numpy(ground_truth).to(device)
# Validity map is where sparse depth is available
validity_map_depth = torch.where(
sparse_depth > 0,
torch.ones_like(sparse_depth),
sparse_depth)
# Remove outlier points and update sparse depth and validity map
filtered_sparse_depth, \
filtered_validity_map_depth = outlier_removal.remove_outliers(
sparse_depth=sparse_depth,
validity_map=validity_map_depth)
[image], \
[sparse_depth], \
[filtered_sparse_depth, filtered_validity_map_depth] = transforms.transform(
images_arr=[image],
range_maps_arr=[sparse_depth],
validity_maps_arr=[filtered_sparse_depth, filtered_validity_map_depth],
random_transform_probability=0.0)
# Forward through network
output_depth = depth_model.forward(
image=image,
sparse_depth=sparse_depth,
validity_map_depth=filtered_validity_map_depth,
intrinsics=intrinsics)
if (idx % n_summary_display_interval) == 0 and summary_writer is not None:
image_summary.append(image)
output_depth_summary.append(output_depth)
sparse_depth_summary.append(filtered_sparse_depth)
validity_map_summary.append(filtered_validity_map_depth)
ground_truth_summary.append(ground_truth)
# Convert to numpy to validate
output_depth = np.squeeze(output_depth.cpu().numpy())
ground_truth = np.squeeze(ground_truth.cpu().numpy())
validity_map = ground_truth[1, :, :]
ground_truth = ground_truth[0, :, :]
# Select valid regions to evaluate
validity_mask = np.where(validity_map > 0, 1, 0)
min_max_mask = np.logical_and(
ground_truth > min_evaluate_depth,
ground_truth < max_evaluate_depth)
mask = np.where(np.logical_and(validity_mask, min_max_mask) > 0)
output_depth = output_depth[mask]
ground_truth = ground_truth[mask]
# Compute validation metrics
mae[idx] = eval_utils.mean_abs_err(1000.0 * output_depth, 1000.0 * ground_truth)
rmse[idx] = eval_utils.root_mean_sq_err(1000.0 * output_depth, 1000.0 * ground_truth)
imae[idx] = eval_utils.inv_mean_abs_err(0.001 * output_depth, 0.001 * ground_truth)
irmse[idx] = eval_utils.inv_root_mean_sq_err(0.001 * output_depth, 0.001 * ground_truth)
# Compute mean metrics
mae = np.mean(mae)
rmse = np.mean(rmse)
imae = np.mean(imae)
irmse = np.mean(irmse)
# Log to tensorboard
if summary_writer is not None:
depth_model.log_summary(
summary_writer=summary_writer,
tag='eval',
step=step,
image0=torch.cat(image_summary, dim=0),
output_depth0=torch.cat(output_depth_summary, dim=0),
sparse_depth0=torch.cat(sparse_depth_summary, dim=0),
validity_map0=torch.cat(validity_map_summary, dim=0),
ground_truth0=torch.cat(ground_truth_summary, dim=0),
scalars={'mae' : mae, 'rmse' : rmse, 'imae' : imae, 'irmse': irmse},
n_display=n_summary_display)
# Print validation results to console
log('Validation results:', log_path)
log('{:>8} {:>8} {:>8} {:>8} {:>8}'.format(
'Step', 'MAE', 'RMSE', 'iMAE', 'iRMSE'),
log_path)
log('{:8} {:8.3f} {:8.3f} {:8.3f} {:8.3f}'.format(
step, mae, rmse, imae, irmse),
log_path)
n_improve = 0
if np.round(mae, 2) <= np.round(best_results['mae'], 2):
n_improve = n_improve + 1
if np.round(rmse, 2) <= np.round(best_results['rmse'], 2):
n_improve = n_improve + 1
if np.round(imae, 2) <= np.round(best_results['imae'], 2):
n_improve = n_improve + 1
if np.round(irmse, 2) <= np.round(best_results['irmse'], 2):
n_improve = n_improve + 1
if n_improve > 2:
best_results['step'] = step
best_results['mae'] = mae
best_results['rmse'] = rmse
best_results['imae'] = imae
best_results['irmse'] = irmse
log('Best results:', log_path)
log('{:>8} {:>8} {:>8} {:>8} {:>8}'.format(
'Step', 'MAE', 'RMSE', 'iMAE', 'iRMSE'),
log_path)
log('{:8} {:8.3f} {:8.3f} {:8.3f} {:8.3f}'.format(
best_results['step'],
best_results['mae'],
best_results['rmse'],
best_results['imae'],
best_results['irmse']), log_path)
return best_results
def run(image_path,
sparse_depth_path,
intrinsics_path,
ground_truth_path=None,
# Input settings
input_channels_image=settings.INPUT_CHANNELS_IMAGE,
input_channels_depth=settings.INPUT_CHANNELS_DEPTH,
normalized_image_range=settings.NORMALIZED_IMAGE_RANGE,
outlier_removal_kernel_size=settings.OUTLIER_REMOVAL_KERNEL_SIZE,
outlier_removal_threshold=settings.OUTLIER_REMOVAL_THRESHOLD,
# Sparse to dense pool settings
min_pool_sizes_sparse_to_dense_pool=settings.MIN_POOL_SIZES_SPARSE_TO_DENSE_POOL,
max_pool_sizes_sparse_to_dense_pool=settings.MAX_POOL_SIZES_SPARSE_TO_DENSE_POOL,
n_convolution_sparse_to_dense_pool=settings.N_CONVOLUTION_SPARSE_TO_DENSE_POOL,
n_filter_sparse_to_dense_pool=settings.N_FILTER_SPARSE_TO_DENSE_POOL,
# Depth network settings
n_filters_encoder_image=settings.N_FILTERS_ENCODER_IMAGE,
n_filters_encoder_depth=settings.N_FILTERS_ENCODER_DEPTH,
resolutions_backprojection=settings.RESOLUTIONS_BACKPROJECTION,
n_filters_decoder=settings.N_FILTERS_DECODER,
deconv_type=settings.DECONV_TYPE,
min_predict_depth=settings.MIN_PREDICT_DEPTH,
max_predict_depth=settings.MAX_PREDICT_DEPTH,
# Weight settings
weight_initializer=settings.WEIGHT_INITIALIZER,
activation_func=settings.ACTIVATION_FUNC,
# Evaluation settings
min_evaluate_depth=settings.MIN_EVALUATE_DEPTH,
max_evaluate_depth=settings.MAX_EVALUATE_DEPTH,
# Checkpoint settings
checkpoint_path=settings.CHECKPOINT_PATH,
depth_model_restore_path=settings.RESTORE_PATH,
# Output settings
save_outputs=False,
keep_input_filenames=False,
# Hardware settings
device=settings.DEVICE):
# Set up output path
if device == settings.CUDA or device == settings.GPU:
device = torch.device(settings.CUDA)
else:
device = torch.device(settings.CPU)
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
# Set up checkpoint and output paths
log_path = os.path.join(checkpoint_path, 'results.txt')
output_path = os.path.join(checkpoint_path, 'outputs')
'''
Load input paths and set up dataloader
'''
image_paths = data_utils.read_paths(image_path)
sparse_depth_paths = data_utils.read_paths(sparse_depth_path)
intrinsics_paths = data_utils.read_paths(intrinsics_path)
ground_truth_available = False
if ground_truth_path != '':
ground_truth_available = True
ground_truth_paths = data_utils.read_paths(ground_truth_path)
n_sample = len(image_paths)
input_paths = [
image_paths,
sparse_depth_paths,
intrinsics_paths
]
if ground_truth_available:
input_paths.append(ground_truth_paths)
for paths in input_paths:
assert n_sample == len(paths)
if ground_truth_available:
ground_truths = []
for path in ground_truth_paths:
ground_truth, validity_map = data_utils.load_depth_with_validity_map(path)
ground_truths.append(np.stack([ground_truth, validity_map], axis=-1))
else:
ground_truths = [None] * n_sample
# Set up dataloader
dataloader = torch.utils.data.DataLoader(
datasets.KBNetInferenceDataset(
image_paths=image_paths,
sparse_depth_paths=sparse_depth_paths,
intrinsics_paths=intrinsics_paths),
batch_size=1,
shuffle=False,
num_workers=1,
drop_last=False)
# Initialize transforms to normalize image and outlier removal for sparse depth
transforms = Transforms(
normalized_image_range=normalized_image_range)
outlier_removal = OutlierRemoval(
kernel_size=outlier_removal_kernel_size,
threshold=outlier_removal_threshold)
'''
Set up the model
'''
depth_model = KBNetModel(
input_channels_image=input_channels_image,
input_channels_depth=input_channels_depth,
min_pool_sizes_sparse_to_dense_pool=min_pool_sizes_sparse_to_dense_pool,
max_pool_sizes_sparse_to_dense_pool=max_pool_sizes_sparse_to_dense_pool,
n_convolution_sparse_to_dense_pool=n_convolution_sparse_to_dense_pool,
n_filter_sparse_to_dense_pool=n_filter_sparse_to_dense_pool,
n_filters_encoder_image=n_filters_encoder_image,
n_filters_encoder_depth=n_filters_encoder_depth,
resolutions_backprojection=resolutions_backprojection,
n_filters_decoder=n_filters_decoder,
deconv_type=deconv_type,
weight_initializer=weight_initializer,
activation_func=activation_func,
min_predict_depth=min_predict_depth,
max_predict_depth=max_predict_depth,
device=device)
# Restore model and set to evaluation mode
depth_model.restore_model(depth_model_restore_path)
depth_model.eval()
parameters_depth_model = depth_model.parameters()
'''
Log input paths
'''
log('Input paths:', log_path)
input_paths = [
image_path,
sparse_depth_path,
intrinsics_path,
]
if ground_truth_available:
input_paths.append(ground_truth_path)
for path in input_paths:
log(path, log_path)
log('', log_path)
'''
Log all settings
'''
log_input_settings(
log_path,
# Input settings
input_channels_image=input_channels_image,
input_channels_depth=input_channels_depth,
normalized_image_range=normalized_image_range,
outlier_removal_kernel_size=outlier_removal_kernel_size,
outlier_removal_threshold=outlier_removal_threshold)
log_network_settings(
log_path,
# Sparse to dense pool settings
min_pool_sizes_sparse_to_dense_pool=min_pool_sizes_sparse_to_dense_pool,
max_pool_sizes_sparse_to_dense_pool=max_pool_sizes_sparse_to_dense_pool,
n_convolution_sparse_to_dense_pool=n_convolution_sparse_to_dense_pool,
n_filter_sparse_to_dense_pool=n_filter_sparse_to_dense_pool,
# Depth network settings
n_filters_encoder_image=n_filters_encoder_image,
n_filters_encoder_depth=n_filters_encoder_depth,
resolutions_backprojection=resolutions_backprojection,
n_filters_decoder=n_filters_decoder,
deconv_type=deconv_type,
min_predict_depth=min_predict_depth,
max_predict_depth=max_predict_depth,
# Weight settings
weight_initializer=weight_initializer,
activation_func=activation_func,
parameters_depth_model=parameters_depth_model)
log_evaluation_settings(
log_path,
min_evaluate_depth=min_evaluate_depth,
max_evaluate_depth=max_evaluate_depth)
log_system_settings(
log_path,
# Checkpoint settings
checkpoint_path=checkpoint_path,
depth_model_restore_path=depth_model_restore_path,
# Hardware settings
device=device,
n_thread=1)
'''
Run model
'''
# Set up metrics in case groundtruth is available
mae = np.zeros(n_sample)
rmse = np.zeros(n_sample)
imae = np.zeros(n_sample)
irmse = np.zeros(n_sample)
images = []
output_depths = []
sparse_depths = []
time_elapse = 0.0
for idx, (inputs, ground_truth) in enumerate(zip(dataloader, ground_truths)):
# Move inputs to device
inputs = [
in_.to(device) for in_ in inputs
]
image, sparse_depth, intrinsics = inputs
time_start = time.time()
# Validity map is where sparse depth is available
validity_map_depth = torch.where(
sparse_depth > 0,
torch.ones_like(sparse_depth),
sparse_depth)
# Remove outlier points and update sparse depth and validity map
filtered_sparse_depth, \
filtered_validity_map_depth = outlier_removal.remove_outliers(
sparse_depth=sparse_depth,
validity_map=validity_map_depth)
[image] = transforms.transform(
images_arr=[image],
random_transform_probability=0.0)
# Forward through network
output_depth = depth_model.forward(
image=image,
sparse_depth=sparse_depth,
validity_map_depth=filtered_validity_map_depth,
intrinsics=intrinsics)
time_elapse = time_elapse + (time.time() - time_start)
# Convert to numpy
output_depth = np.squeeze(output_depth.detach().cpu().numpy())
# Save to output
if save_outputs:
images.append(np.transpose(np.squeeze(image.cpu().numpy()), (1, 2, 0)))
sparse_depths.append(np.squeeze(filtered_sparse_depth.cpu().numpy()))
output_depths.append(output_depth)
if ground_truth_available:
ground_truth = np.squeeze(ground_truth)
validity_map = ground_truth[:, :, 1]
ground_truth = ground_truth[:, :, 0]
validity_mask = np.where(validity_map > 0, 1, 0)
min_max_mask = np.logical_and(
ground_truth > min_evaluate_depth,
ground_truth < max_evaluate_depth)
mask = np.where(np.logical_and(validity_mask, min_max_mask) > 0)
output_depth = output_depth[mask]
ground_truth = ground_truth[mask]
mae[idx] = eval_utils.mean_abs_err(1000.0 * output_depth, 1000.0 * ground_truth)
rmse[idx] = eval_utils.root_mean_sq_err(1000.0 * output_depth, 1000.0 * ground_truth)
imae[idx] = eval_utils.inv_mean_abs_err(0.001 * output_depth, 0.001 * ground_truth)
irmse[idx] = eval_utils.inv_root_mean_sq_err(0.001 * output_depth, 0.001 * ground_truth)
# Compute total time elapse in ms
time_elapse = time_elapse * 1000.0
if ground_truth_available:
mae_mean = np.mean(mae)
rmse_mean = np.mean(rmse)
imae_mean = np.mean(imae)
irmse_mean = np.mean(irmse)
mae_std = np.std(mae)
rmse_std = np.std(rmse)
imae_std = np.std(imae)
irmse_std = np.std(irmse)
# Print evaluation results to console and file
log('Evaluation results:', log_path)
log('{:>8} {:>8} {:>8} {:>8}'.format(
'MAE', 'RMSE', 'iMAE', 'iRMSE'),
log_path)
log('{:8.3f} {:8.3f} {:8.3f} {:8.3f}'.format(
mae_mean, rmse_mean, imae_mean, irmse_mean),
log_path)
log('{:>8} {:>8} {:>8} {:>8}'.format(
'+/-', '+/-', '+/-', '+/-'),
log_path)
log('{:8.3f} {:8.3f} {:8.3f} {:8.3f}'.format(
mae_std, rmse_std, imae_std, irmse_std),
log_path)
# Log run time
log('Total time: {:.2f} ms Average time per sample: {:.2f} ms'.format(
time_elapse, time_elapse / float(n_sample)))
if save_outputs:
log('Saving outputs to {}'.format(output_path), log_path)
outputs = zip(images, output_depths, sparse_depths, ground_truths)
image_dirpath = os.path.join(output_path, 'image')
output_depth_dirpath = os.path.join(output_path, 'output_depth')
sparse_depth_dirpath = os.path.join(output_path, 'sparse_depth')
ground_truth_dirpath = os.path.join(output_path, 'ground_truth')
dirpaths = [
image_dirpath,
output_depth_dirpath,
sparse_depth_dirpath,
ground_truth_dirpath
]
for dirpath in dirpaths:
if not os.path.exists(dirpath):
os.makedirs(dirpath)
for idx, (image, output_depth, sparse_depth, ground_truth) in enumerate(outputs):
if keep_input_filenames:
filename = os.path.basename(image_paths[idx])
else:
filename = '{:010d}.png'.format(idx)
image_path = os.path.join(image_dirpath, filename)
image = (255 * image).astype(np.uint8)
Image.fromarray(image).save(image_path)
output_depth_path = os.path.join(output_depth_dirpath, filename)
data_utils.save_depth(output_depth, output_depth_path)
sparse_depth_path = os.path.join(sparse_depth_dirpath, filename)
data_utils.save_depth(sparse_depth, sparse_depth_path)
if ground_truth_available:
ground_truth_path = os.path.join(ground_truth_dirpath, filename)
data_utils.save_depth(ground_truth[..., 0], ground_truth_path)
'''
Helper functions for logging
'''
def log_input_settings(log_path,
n_batch=None,
n_height=None,
n_width=None,
input_channels_image=settings.INPUT_CHANNELS_IMAGE,
input_channels_depth=settings.INPUT_CHANNELS_DEPTH,
normalized_image_range=settings.NORMALIZED_IMAGE_RANGE,
outlier_removal_kernel_size=settings.OUTLIER_REMOVAL_KERNEL_SIZE,
outlier_removal_threshold=settings.OUTLIER_REMOVAL_THRESHOLD):
batch_settings_text = ''
batch_settings_vars = []
if n_batch is not None:
batch_settings_text = batch_settings_text + 'n_batch={}'
batch_settings_vars.append(n_batch)
batch_settings_text = \
batch_settings_text + ' ' if len(batch_settings_text) > 0 else batch_settings_text
if n_height is not None:
batch_settings_text = batch_settings_text + 'n_height={}'
batch_settings_vars.append(n_height)
batch_settings_text = \
batch_settings_text + ' ' if len(batch_settings_text) > 0 else batch_settings_text
if n_width is not None:
batch_settings_text = batch_settings_text + 'n_width={}'
batch_settings_vars.append(n_width)
log('Input settings:', log_path)
if len(batch_settings_vars) > 0:
log(batch_settings_text.format(*batch_settings_vars),
log_path)
log('input_channels_image={} input_channels_depth={}'.format(
input_channels_image, input_channels_depth),
log_path)
log('normalized_image_range={}'.format(normalized_image_range),
log_path)
log('outlier_removal_kernel_size={} outlier_removal_threshold={:.2f}'.format(
outlier_removal_kernel_size, outlier_removal_threshold),
log_path)
log('', log_path)
def log_network_settings(log_path,
# Sparse to dense pool settings
min_pool_sizes_sparse_to_dense_pool,
max_pool_sizes_sparse_to_dense_pool,
n_convolution_sparse_to_dense_pool,
n_filter_sparse_to_dense_pool,
# Depth network settings
n_filters_encoder_image,
n_filters_encoder_depth,
resolutions_backprojection,
n_filters_decoder,
deconv_type,
min_predict_depth,
max_predict_depth,
# Weight settings
weight_initializer,
activation_func,
parameters_depth_model=[],
parameters_pose_model=[]):
# Computer number of parameters
n_parameter_depth = sum(p.numel() for p in parameters_depth_model)
n_parameter_pose = sum(p.numel() for p in parameters_pose_model)
n_parameter = n_parameter_depth + n_parameter_pose
n_parameter_text = 'n_parameter={}'.format(n_parameter)
n_parameter_vars = []
if n_parameter_depth > 0 :
n_parameter_text = n_parameter_text + 'n_parameter_depth={}'
n_parameter_vars.append(n_parameter_depth)
n_parameter_text = \
n_parameter_text + ' ' if len(n_parameter_text) > 0 else n_parameter_text
if n_parameter_pose > 0 :
n_parameter_text = n_parameter_text + 'n_parameter_pose={}'
n_parameter_vars.append(n_parameter_pose)
n_parameter_text = \
n_parameter_text + ' ' if len(n_parameter_text) > 0 else n_parameter_text
log('Sparse to dense pooling settings:', log_path)
log('min_pool_sizes_sparse_to_dense_pool={}'.format(min_pool_sizes_sparse_to_dense_pool),
log_path)
log('max_pool_sizes_sparse_to_dense_pool={}'.format(max_pool_sizes_sparse_to_dense_pool),
log_path)
log('n_convolution_sparse_to_dense_pool={}'.format(n_convolution_sparse_to_dense_pool),
log_path)
log('n_filter_sparse_to_dense_pool={}'.format(n_filter_sparse_to_dense_pool),
log_path)
log('', log_path)
log('Depth network settings:', log_path)
log('n_filters_encoder_image={}'.format(n_filters_encoder_image),
log_path)
log('n_filters_encoder_depth={}'.format(n_filters_encoder_depth),
log_path)
log('resolutions_backprojection={}'.format(resolutions_backprojection),
log_path)
log('n_filters_decoder={}'.format(n_filters_decoder),
log_path)
log('deconv_type={}'.format(deconv_type),
log_path)
log('min_predict_depth={:.2f} max_predict_depth={:.2f}'.format(
min_predict_depth, max_predict_depth),
log_path)
log('', log_path)
log('Weight settings:', log_path)
log('n_parameter={} n_parameter_depth={} n_parameter_pose={}'.format(
n_parameter, n_parameter_depth, n_parameter_pose),
log_path)
log('weight_initializer={} activation_func={}'.format(
weight_initializer, activation_func),
log_path)
log('', log_path)
def log_training_settings(log_path,
# Training settings
n_batch,
n_train_sample,
n_train_step,
learning_rates,
learning_schedule,
# Augmentation settings
augmentation_probabilities,
augmentation_schedule,
augmentation_random_crop_type,
augmentation_random_flip_type,
augmentation_random_remove_points,
augmentation_random_noise_type,
augmentation_random_noise_spread):
log('Training settings:', log_path)
log('n_sample={} n_epoch={} n_step={}'.format(
n_train_sample, learning_schedule[-1], n_train_step),
log_path)
log('learning_schedule=[%s]' %
', '.join('{}-{} : {}'.format(
ls * (n_train_sample // n_batch), le * (n_train_sample // n_batch), v)
for ls, le, v in zip([0] + learning_schedule[:-1], learning_schedule, learning_rates)),
log_path)
log('', log_path)
log('Augmentation settings:', log_path)
log('augmentation_schedule=[%s]' %
', '.join('{}-{} : {}'.format(
ls * (n_train_sample // n_batch), le * (n_train_sample // n_batch), v)
for ls, le, v in zip([0] + augmentation_schedule[:-1], augmentation_schedule, augmentation_probabilities)),
log_path)
log('augmentation_random_crop_type={}'.format(augmentation_random_crop_type),
log_path)
log('augmentation_random_flip_type={}'.format(augmentation_random_flip_type),
log_path)
log('augmentation_random_remove_points={}'.format(augmentation_random_remove_points),
log_path)
log('augmentation_random_noise_type={} augmentation_random_noise_spread={}'.format(
augmentation_random_noise_type, augmentation_random_noise_spread),
log_path)
log('', log_path)
def log_loss_func_settings(log_path,
# Loss function settings
w_color,
w_structure,
w_sparse_depth,
w_smoothness,
w_weight_decay_depth,
w_weight_decay_pose):
log('Loss function settings:', log_path)
log('w_color={:.1e} w_structure={:.1e} w_sparse_depth={:.1e}'.format(
w_color, w_structure, w_sparse_depth),
log_path)
log('w_smoothness={:.1e}'.format(w_smoothness),
log_path)
log('w_weight_decay_depth={:.1e} w_weight_decay_pose={:.1e}'.format(
w_weight_decay_depth, w_weight_decay_pose),
log_path)
log('', log_path)
def log_evaluation_settings(log_path,
min_evaluate_depth,
max_evaluate_depth):
log('Evaluation settings:', log_path)
log('min_evaluate_depth={:.2f} max_evaluate_depth={:.2f}'.format(
min_evaluate_depth, max_evaluate_depth),
log_path)
log('', log_path)
def log_system_settings(log_path,
# Checkpoint settings
checkpoint_path,
n_checkpoint=None,
summary_event_path=None,
n_summary=None,
n_summary_display=None,
validation_start_step=None,
depth_model_restore_path=None,
pose_model_restore_path=None,
# Hardware settings
device=torch.device('cuda'),
n_thread=8):
log('Checkpoint settings:', log_path)
if checkpoint_path is not None:
log('checkpoint_path={}'.format(checkpoint_path), log_path)
if n_checkpoint is not None:
log('checkpoint_save_frequency={}'.format(n_checkpoint), log_path)
if validation_start_step is not None:
log('validation_start_step={}'.format(validation_start_step), log_path)
log('', log_path)
summary_settings_text = ''
summary_settings_vars = []
if summary_event_path is not None:
log('Tensorboard settings:', log_path)
log('event_path={}'.format(summary_event_path), log_path)
if n_summary is not None:
summary_settings_text = summary_settings_text + 'log_summary_frequency={}'
summary_settings_vars.append(n_summary)
summary_settings_text = \
summary_settings_text + ' ' if len(summary_settings_text) > 0 else summary_settings_text
if n_summary_display is not None:
summary_settings_text = summary_settings_text + 'n_summary_display={}'
summary_settings_vars.append(n_summary_display)
summary_settings_text = \
summary_settings_text + ' ' if len(summary_settings_text) > 0 else summary_settings_text
if len(summary_settings_text) > 0:
log(summary_settings_text.format(*summary_settings_vars), log_path)
if depth_model_restore_path is not None and depth_model_restore_path != '':
log('depth_model_restore_path={}'.format(depth_model_restore_path),
log_path)
if pose_model_restore_path is not None and pose_model_restore_path != '':
log('pose_model_restore_path={}'.format(pose_model_restore_path),
log_path)
log('', log_path)
log('Hardware settings:', log_path)
log('device={}'.format(device.type), log_path)
log('n_thread={}'.format(n_thread), log_path)
log('', log_path)
| 38.111025
| 119
| 0.654056
|
import os, time
import numpy as np
import torch
from PIL import Image
from torch.utils.tensorboard import SummaryWriter
import datasets, data_utils, eval_utils
from log_utils import log
from kbnet_model import KBNetModel
from posenet_model import PoseNetModel
import global_constants as settings
from transforms import Transforms
from net_utils import OutlierRemoval
def train(train_image_path,
train_sparse_depth_path,
train_intrinsics_path,
val_image_path,
val_sparse_depth_path,
val_intrinsics_path,
val_ground_truth_path,
n_batch=settings.N_BATCH,
n_height=settings.N_HEIGHT,
n_width=settings.N_WIDTH,
input_channels_image=settings.INPUT_CHANNELS_IMAGE,
input_channels_depth=settings.INPUT_CHANNELS_DEPTH,
normalized_image_range=settings.NORMALIZED_IMAGE_RANGE,
outlier_removal_kernel_size=settings.OUTLIER_REMOVAL_KERNEL_SIZE,
outlier_removal_threshold=settings.OUTLIER_REMOVAL_THRESHOLD,
min_pool_sizes_sparse_to_dense_pool=settings.MIN_POOL_SIZES_SPARSE_TO_DENSE_POOL,
max_pool_sizes_sparse_to_dense_pool=settings.MAX_POOL_SIZES_SPARSE_TO_DENSE_POOL,
n_convolution_sparse_to_dense_pool=settings.N_CONVOLUTION_SPARSE_TO_DENSE_POOL,
n_filter_sparse_to_dense_pool=settings.N_FILTER_SPARSE_TO_DENSE_POOL,
n_filters_encoder_image=settings.N_FILTERS_ENCODER_IMAGE,
n_filters_encoder_depth=settings.N_FILTERS_ENCODER_DEPTH,
resolutions_backprojection=settings.RESOLUTIONS_BACKPROJECTION,
n_filters_decoder=settings.N_FILTERS_DECODER,
deconv_type=settings.DECONV_TYPE,
min_predict_depth=settings.MIN_PREDICT_DEPTH,
max_predict_depth=settings.MAX_PREDICT_DEPTH,
weight_initializer=settings.WEIGHT_INITIALIZER,
activation_func=settings.ACTIVATION_FUNC,
learning_rates=settings.LEARNING_RATES,
learning_schedule=settings.LEARNING_SCHEDULE,
augmentation_probabilities=settings.AUGMENTATION_PROBABILITIES,
augmentation_schedule=settings.AUGMENTATION_SCHEDULE,
augmentation_random_crop_type=settings.AUGMENTATION_RANDOM_CROP_TYPE,
augmentation_random_flip_type=settings.AUGMENTATION_RANDOM_FLIP_TYPE,
augmentation_random_remove_points=settings.AUGMENTATION_RANDOM_REMOVE_POINTS,
augmentation_random_noise_type=settings.AUGMENTATION_RANDOM_NOISE_TYPE,
augmentation_random_noise_spread=settings.AUGMENTATION_RANDOM_NOISE_SPREAD,
w_color=settings.W_COLOR,
w_structure=settings.W_STRUCTURE,
w_sparse_depth=settings.W_SPARSE_DEPTH,
w_smoothness=settings.W_SMOOTHNESS,
w_weight_decay_depth=settings.W_WEIGHT_DECAY_DEPTH,
w_weight_decay_pose=settings.W_WEIGHT_DECAY_POSE,
min_evaluate_depth=settings.MIN_EVALUATE_DEPTH,
max_evaluate_depth=settings.MAX_EVALUATE_DEPTH,
checkpoint_path=settings.CHECKPOINT_PATH,
n_checkpoint=settings.N_CHECKPOINT,
n_summary=settings.N_SUMMARY,
n_summary_display=settings.N_SUMMARY_DISPLAY,
validation_start_step=settings.VALIDATION_START_STEP,
depth_model_restore_path=settings.RESTORE_PATH,
pose_model_restore_path=settings.RESTORE_PATH,
device=settings.DEVICE,
n_thread=settings.N_THREAD):
if device == settings.CUDA or device == settings.GPU:
device = torch.device(settings.CUDA)
else:
device = torch.device(settings.CPU)
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
depth_model_checkpoint_path = os.path.join(checkpoint_path, 'depth_model-{}.pth')
pose_model_checkpoint_path = os.path.join(checkpoint_path, 'pose_model-{}.pth')
log_path = os.path.join(checkpoint_path, 'results.txt')
event_path = os.path.join(checkpoint_path, 'events')
best_results = {
'step': -1,
'mae': np.infty,
'rmse': np.infty,
'imae': np.infty,
'irmse': np.infty
}
train_image_paths = data_utils.read_paths(train_image_path)
train_sparse_depth_paths = data_utils.read_paths(train_sparse_depth_path)
train_intrinsics_paths = data_utils.read_paths(train_intrinsics_path)
n_train_sample = len(train_image_paths)
assert len(train_sparse_depth_paths) == n_train_sample
assert len(train_intrinsics_paths) == n_train_sample
n_train_step = \
learning_schedule[-1] * np.ceil(n_train_sample / n_batch).astype(np.int32)
train_dataloader = torch.utils.data.DataLoader(
datasets.KBNetTrainingDataset(
image_paths=train_image_paths,
sparse_depth_paths=train_sparse_depth_paths,
intrinsics_paths=train_intrinsics_paths,
shape=(n_height, n_width),
random_crop_type=augmentation_random_crop_type),
batch_size=n_batch,
shuffle=True,
num_workers=n_thread,
drop_last=False)
train_transforms = Transforms(
normalized_image_range=normalized_image_range,
random_flip_type=augmentation_random_flip_type,
random_remove_points=augmentation_random_remove_points,
random_noise_type=augmentation_random_noise_type,
random_noise_spread=augmentation_random_noise_spread)
validation_available = val_image_path is not None and \
val_sparse_depth_path is not None and \
val_intrinsics_path is not None and \
val_ground_truth_path is not None
if validation_available:
val_image_paths = data_utils.read_paths(val_image_path)
val_sparse_depth_paths = data_utils.read_paths(val_sparse_depth_path)
val_intrinsics_paths = data_utils.read_paths(val_intrinsics_path)
val_ground_truth_paths = data_utils.read_paths(val_ground_truth_path)
n_val_sample = len(val_image_paths)
assert len(val_sparse_depth_paths) == n_val_sample
assert len(val_intrinsics_paths) == n_val_sample
assert len(val_ground_truth_paths) == n_val_sample
ground_truths = []
for path in val_ground_truth_paths:
ground_truth, validity_map = data_utils.load_depth_with_validity_map(path)
ground_truths.append(np.stack([ground_truth, validity_map], axis=-1))
val_dataloader = torch.utils.data.DataLoader(
datasets.KBNetInferenceDataset(
image_paths=val_image_paths,
sparse_depth_paths=val_sparse_depth_paths,
intrinsics_paths=val_intrinsics_paths),
batch_size=1,
shuffle=False,
num_workers=1,
drop_last=False)
val_transforms = Transforms(
normalized_image_range=normalized_image_range)
outlier_removal = OutlierRemoval(
kernel_size=outlier_removal_kernel_size,
threshold=outlier_removal_threshold)
depth_model = KBNetModel(
input_channels_image=input_channels_image,
input_channels_depth=input_channels_depth,
min_pool_sizes_sparse_to_dense_pool=min_pool_sizes_sparse_to_dense_pool,
max_pool_sizes_sparse_to_dense_pool=max_pool_sizes_sparse_to_dense_pool,
n_convolution_sparse_to_dense_pool=n_convolution_sparse_to_dense_pool,
n_filter_sparse_to_dense_pool=n_filter_sparse_to_dense_pool,
n_filters_encoder_image=n_filters_encoder_image,
n_filters_encoder_depth=n_filters_encoder_depth,
resolutions_backprojection=resolutions_backprojection,
n_filters_decoder=n_filters_decoder,
deconv_type=deconv_type,
weight_initializer=weight_initializer,
activation_func=activation_func,
min_predict_depth=min_predict_depth,
max_predict_depth=max_predict_depth,
device=device)
parameters_depth_model = depth_model.parameters()
depth_model.train()
pose_model = PoseNetModel(
encoder_type='resnet18',
rotation_parameterization='axis',
weight_initializer=weight_initializer,
activation_func='relu',
device=device)
parameters_pose_model = pose_model.parameters()
pose_model.train()
if depth_model_restore_path is not None and depth_model_restore_path != '':
depth_model.restore_model(depth_model_restore_path)
if pose_model_restore_path is not None and pose_model_restore_path != '':
pose_model.restore_model(pose_model_restore_path)
train_summary_writer = SummaryWriter(event_path + '-train')
val_summary_writer = SummaryWriter(event_path + '-val')
log('Training input paths:', log_path)
train_input_paths = [
train_image_path,
train_sparse_depth_path,
train_intrinsics_path
]
for path in train_input_paths:
log(path, log_path)
log('', log_path)
log('Validation input paths:', log_path)
val_input_paths = [
val_image_path,
val_sparse_depth_path,
val_intrinsics_path,
val_ground_truth_path
]
for path in val_input_paths:
log(path, log_path)
log('', log_path)
log_input_settings(
log_path,
n_batch=n_batch,
n_height=n_height,
n_width=n_width,
input_channels_image=input_channels_image,
input_channels_depth=input_channels_depth,
normalized_image_range=normalized_image_range,
outlier_removal_kernel_size=outlier_removal_kernel_size,
outlier_removal_threshold=outlier_removal_threshold)
log_network_settings(
log_path,
min_pool_sizes_sparse_to_dense_pool=min_pool_sizes_sparse_to_dense_pool,
max_pool_sizes_sparse_to_dense_pool=max_pool_sizes_sparse_to_dense_pool,
n_convolution_sparse_to_dense_pool=n_convolution_sparse_to_dense_pool,
n_filter_sparse_to_dense_pool=n_filter_sparse_to_dense_pool,
n_filters_encoder_image=n_filters_encoder_image,
n_filters_encoder_depth=n_filters_encoder_depth,
resolutions_backprojection=resolutions_backprojection,
n_filters_decoder=n_filters_decoder,
deconv_type=deconv_type,
min_predict_depth=min_predict_depth,
max_predict_depth=max_predict_depth,
weight_initializer=weight_initializer,
activation_func=activation_func,
parameters_depth_model=parameters_depth_model,
parameters_pose_model=parameters_pose_model)
log_training_settings(
log_path,
n_batch=n_batch,
n_train_sample=n_train_sample,
n_train_step=n_train_step,
learning_rates=learning_rates,
learning_schedule=learning_schedule,
augmentation_probabilities=augmentation_probabilities,
augmentation_schedule=augmentation_schedule,
augmentation_random_crop_type=augmentation_random_crop_type,
augmentation_random_flip_type=augmentation_random_flip_type,
augmentation_random_remove_points=augmentation_random_remove_points,
augmentation_random_noise_type=augmentation_random_noise_type,
augmentation_random_noise_spread=augmentation_random_noise_spread)
log_loss_func_settings(
log_path,
w_color=w_color,
w_structure=w_structure,
w_sparse_depth=w_sparse_depth,
w_smoothness=w_smoothness,
w_weight_decay_depth=w_weight_decay_depth,
w_weight_decay_pose=w_weight_decay_pose)
log_evaluation_settings(
log_path,
min_evaluate_depth=min_evaluate_depth,
max_evaluate_depth=max_evaluate_depth)
log_system_settings(
log_path,
checkpoint_path=checkpoint_path,
n_checkpoint=n_checkpoint,
summary_event_path=event_path,
n_summary=n_summary,
n_summary_display=n_summary_display,
validation_start_step=validation_start_step,
depth_model_restore_path=depth_model_restore_path,
pose_model_restore_path=pose_model_restore_path,
device=device,
n_thread=n_thread)
learning_schedule_pos = 0
learning_rate = learning_rates[0]
augmentation_schedule_pos = 0
augmentation_probability = augmentation_probabilities[0]
optimizer = torch.optim.Adam([
{
'params' : parameters_depth_model,
'weight_decay' : w_weight_decay_depth
},
{
'params' : parameters_pose_model,
'weight_decay' : w_weight_decay_pose
}],
lr=learning_rate)
train_step = 0
time_start = time.time()
log('Begin training...', log_path)
for epoch in range(1, learning_schedule[-1] + 1):
if epoch > learning_schedule[learning_schedule_pos]:
learning_schedule_pos = learning_schedule_pos + 1
learning_rate = learning_rates[learning_schedule_pos]
for g in optimizer.param_groups:
g['lr'] = learning_rate
if -1 not in augmentation_schedule and epoch > augmentation_schedule[augmentation_schedule_pos]:
augmentation_schedule_pos = augmentation_schedule_pos + 1
augmentation_probability = augmentation_probabilities[augmentation_schedule_pos]
for inputs in train_dataloader:
train_step = train_step + 1
inputs = [
in_.to(device) for in_ in inputs
]
image0, image1, image2, sparse_depth0, intrinsics = inputs
validity_map_depth0 = torch.where(
sparse_depth0 > 0,
torch.ones_like(sparse_depth0),
sparse_depth0)
filtered_sparse_depth0, \
filtered_validity_map_depth0 = outlier_removal.remove_outliers(
sparse_depth=sparse_depth0,
validity_map=validity_map_depth0)
[image0, image1, image2], \
[sparse_depth0], \
[filtered_sparse_depth0, filtered_validity_map_depth0] = train_transforms.transform(
images_arr=[image0, image1, image2],
range_maps_arr=[sparse_depth0],
validity_maps_arr=[filtered_sparse_depth0, filtered_validity_map_depth0],
random_transform_probability=augmentation_probability)
output_depth0 = depth_model.forward(
image=image0,
sparse_depth=sparse_depth0,
validity_map_depth=filtered_validity_map_depth0,
intrinsics=intrinsics)
pose01 = pose_model.forward(image0, image1)
pose02 = pose_model.forward(image0, image2)
loss, loss_info = depth_model.compute_loss(
image0=image0,
image1=image1,
image2=image2,
output_depth0=output_depth0,
sparse_depth0=filtered_sparse_depth0,
validity_map_depth0=filtered_validity_map_depth0,
intrinsics=intrinsics,
pose01=pose01,
pose02=pose02,
w_color=w_color,
w_structure=w_structure,
w_sparse_depth=w_sparse_depth,
w_smoothness=w_smoothness)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (train_step % n_summary) == 0:
image01 = loss_info.pop('image01')
image02 = loss_info.pop('image02')
depth_model.log_summary(
summary_writer=train_summary_writer,
tag='train',
step=train_step,
image0=image0,
image01=image01,
image02=image02,
output_depth0=output_depth0,
sparse_depth0=filtered_sparse_depth0,
validity_map0=filtered_validity_map_depth0,
pose01=pose01,
pose02=pose02,
scalars=loss_info,
n_display=min(n_batch, n_summary_display))
if (train_step % n_checkpoint) == 0:
time_elapse = (time.time() - time_start) / 3600
time_remain = (n_train_step - train_step) * time_elapse / train_step
log('Step={:6}/{} Loss={:.5f} Time Elapsed={:.2f}h Time Remaining={:.2f}h'.format(
train_step, n_train_step, loss.item(), time_elapse, time_remain),
log_path)
if train_step >= validation_start_step and validation_available:
depth_model.eval()
with torch.no_grad():
best_results = validate(
depth_model=depth_model,
dataloader=val_dataloader,
transforms=val_transforms,
outlier_removal=outlier_removal,
ground_truths=ground_truths,
step=train_step,
best_results=best_results,
min_evaluate_depth=min_evaluate_depth,
max_evaluate_depth=max_evaluate_depth,
device=device,
summary_writer=val_summary_writer,
n_summary_display=n_summary_display,
log_path=log_path)
depth_model.train()
depth_model.save_model(
depth_model_checkpoint_path.format(train_step), train_step, optimizer)
pose_model.save_model(
pose_model_checkpoint_path.format(train_step), train_step, optimizer)
depth_model.save_model(
depth_model_checkpoint_path.format(train_step), train_step, optimizer)
pose_model.save_model(
pose_model_checkpoint_path.format(train_step), train_step, optimizer)
def validate(depth_model,
dataloader,
transforms,
outlier_removal,
ground_truths,
step,
best_results,
min_evaluate_depth,
max_evaluate_depth,
device,
summary_writer,
n_summary_display=4,
n_summary_display_interval=250,
log_path=None):
n_sample = len(dataloader)
mae = np.zeros(n_sample)
rmse = np.zeros(n_sample)
imae = np.zeros(n_sample)
irmse = np.zeros(n_sample)
image_summary = []
output_depth_summary = []
sparse_depth_summary = []
validity_map_summary = []
ground_truth_summary = []
for idx, (inputs, ground_truth) in enumerate(zip(dataloader, ground_truths)):
inputs = [
in_.to(device) for in_ in inputs
]
image, sparse_depth, intrinsics = inputs
ground_truth = np.expand_dims(ground_truth, axis=0)
ground_truth = np.transpose(ground_truth, (0, 3, 1, 2))
ground_truth = torch.from_numpy(ground_truth).to(device)
validity_map_depth = torch.where(
sparse_depth > 0,
torch.ones_like(sparse_depth),
sparse_depth)
filtered_sparse_depth, \
filtered_validity_map_depth = outlier_removal.remove_outliers(
sparse_depth=sparse_depth,
validity_map=validity_map_depth)
[image], \
[sparse_depth], \
[filtered_sparse_depth, filtered_validity_map_depth] = transforms.transform(
images_arr=[image],
range_maps_arr=[sparse_depth],
validity_maps_arr=[filtered_sparse_depth, filtered_validity_map_depth],
random_transform_probability=0.0)
output_depth = depth_model.forward(
image=image,
sparse_depth=sparse_depth,
validity_map_depth=filtered_validity_map_depth,
intrinsics=intrinsics)
if (idx % n_summary_display_interval) == 0 and summary_writer is not None:
image_summary.append(image)
output_depth_summary.append(output_depth)
sparse_depth_summary.append(filtered_sparse_depth)
validity_map_summary.append(filtered_validity_map_depth)
ground_truth_summary.append(ground_truth)
output_depth = np.squeeze(output_depth.cpu().numpy())
ground_truth = np.squeeze(ground_truth.cpu().numpy())
validity_map = ground_truth[1, :, :]
ground_truth = ground_truth[0, :, :]
validity_mask = np.where(validity_map > 0, 1, 0)
min_max_mask = np.logical_and(
ground_truth > min_evaluate_depth,
ground_truth < max_evaluate_depth)
mask = np.where(np.logical_and(validity_mask, min_max_mask) > 0)
output_depth = output_depth[mask]
ground_truth = ground_truth[mask]
mae[idx] = eval_utils.mean_abs_err(1000.0 * output_depth, 1000.0 * ground_truth)
rmse[idx] = eval_utils.root_mean_sq_err(1000.0 * output_depth, 1000.0 * ground_truth)
imae[idx] = eval_utils.inv_mean_abs_err(0.001 * output_depth, 0.001 * ground_truth)
irmse[idx] = eval_utils.inv_root_mean_sq_err(0.001 * output_depth, 0.001 * ground_truth)
mae = np.mean(mae)
rmse = np.mean(rmse)
imae = np.mean(imae)
irmse = np.mean(irmse)
if summary_writer is not None:
depth_model.log_summary(
summary_writer=summary_writer,
tag='eval',
step=step,
image0=torch.cat(image_summary, dim=0),
output_depth0=torch.cat(output_depth_summary, dim=0),
sparse_depth0=torch.cat(sparse_depth_summary, dim=0),
validity_map0=torch.cat(validity_map_summary, dim=0),
ground_truth0=torch.cat(ground_truth_summary, dim=0),
scalars={'mae' : mae, 'rmse' : rmse, 'imae' : imae, 'irmse': irmse},
n_display=n_summary_display)
log('Validation results:', log_path)
log('{:>8} {:>8} {:>8} {:>8} {:>8}'.format(
'Step', 'MAE', 'RMSE', 'iMAE', 'iRMSE'),
log_path)
log('{:8} {:8.3f} {:8.3f} {:8.3f} {:8.3f}'.format(
step, mae, rmse, imae, irmse),
log_path)
n_improve = 0
if np.round(mae, 2) <= np.round(best_results['mae'], 2):
n_improve = n_improve + 1
if np.round(rmse, 2) <= np.round(best_results['rmse'], 2):
n_improve = n_improve + 1
if np.round(imae, 2) <= np.round(best_results['imae'], 2):
n_improve = n_improve + 1
if np.round(irmse, 2) <= np.round(best_results['irmse'], 2):
n_improve = n_improve + 1
if n_improve > 2:
best_results['step'] = step
best_results['mae'] = mae
best_results['rmse'] = rmse
best_results['imae'] = imae
best_results['irmse'] = irmse
log('Best results:', log_path)
log('{:>8} {:>8} {:>8} {:>8} {:>8}'.format(
'Step', 'MAE', 'RMSE', 'iMAE', 'iRMSE'),
log_path)
log('{:8} {:8.3f} {:8.3f} {:8.3f} {:8.3f}'.format(
best_results['step'],
best_results['mae'],
best_results['rmse'],
best_results['imae'],
best_results['irmse']), log_path)
return best_results
def run(image_path,
sparse_depth_path,
intrinsics_path,
ground_truth_path=None,
input_channels_image=settings.INPUT_CHANNELS_IMAGE,
input_channels_depth=settings.INPUT_CHANNELS_DEPTH,
normalized_image_range=settings.NORMALIZED_IMAGE_RANGE,
outlier_removal_kernel_size=settings.OUTLIER_REMOVAL_KERNEL_SIZE,
outlier_removal_threshold=settings.OUTLIER_REMOVAL_THRESHOLD,
min_pool_sizes_sparse_to_dense_pool=settings.MIN_POOL_SIZES_SPARSE_TO_DENSE_POOL,
max_pool_sizes_sparse_to_dense_pool=settings.MAX_POOL_SIZES_SPARSE_TO_DENSE_POOL,
n_convolution_sparse_to_dense_pool=settings.N_CONVOLUTION_SPARSE_TO_DENSE_POOL,
n_filter_sparse_to_dense_pool=settings.N_FILTER_SPARSE_TO_DENSE_POOL,
n_filters_encoder_image=settings.N_FILTERS_ENCODER_IMAGE,
n_filters_encoder_depth=settings.N_FILTERS_ENCODER_DEPTH,
resolutions_backprojection=settings.RESOLUTIONS_BACKPROJECTION,
n_filters_decoder=settings.N_FILTERS_DECODER,
deconv_type=settings.DECONV_TYPE,
min_predict_depth=settings.MIN_PREDICT_DEPTH,
max_predict_depth=settings.MAX_PREDICT_DEPTH,
weight_initializer=settings.WEIGHT_INITIALIZER,
activation_func=settings.ACTIVATION_FUNC,
min_evaluate_depth=settings.MIN_EVALUATE_DEPTH,
max_evaluate_depth=settings.MAX_EVALUATE_DEPTH,
checkpoint_path=settings.CHECKPOINT_PATH,
depth_model_restore_path=settings.RESTORE_PATH,
save_outputs=False,
keep_input_filenames=False,
device=settings.DEVICE):
if device == settings.CUDA or device == settings.GPU:
device = torch.device(settings.CUDA)
else:
device = torch.device(settings.CPU)
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
log_path = os.path.join(checkpoint_path, 'results.txt')
output_path = os.path.join(checkpoint_path, 'outputs')
image_paths = data_utils.read_paths(image_path)
sparse_depth_paths = data_utils.read_paths(sparse_depth_path)
intrinsics_paths = data_utils.read_paths(intrinsics_path)
ground_truth_available = False
if ground_truth_path != '':
ground_truth_available = True
ground_truth_paths = data_utils.read_paths(ground_truth_path)
n_sample = len(image_paths)
input_paths = [
image_paths,
sparse_depth_paths,
intrinsics_paths
]
if ground_truth_available:
input_paths.append(ground_truth_paths)
for paths in input_paths:
assert n_sample == len(paths)
if ground_truth_available:
ground_truths = []
for path in ground_truth_paths:
ground_truth, validity_map = data_utils.load_depth_with_validity_map(path)
ground_truths.append(np.stack([ground_truth, validity_map], axis=-1))
else:
ground_truths = [None] * n_sample
dataloader = torch.utils.data.DataLoader(
datasets.KBNetInferenceDataset(
image_paths=image_paths,
sparse_depth_paths=sparse_depth_paths,
intrinsics_paths=intrinsics_paths),
batch_size=1,
shuffle=False,
num_workers=1,
drop_last=False)
transforms = Transforms(
normalized_image_range=normalized_image_range)
outlier_removal = OutlierRemoval(
kernel_size=outlier_removal_kernel_size,
threshold=outlier_removal_threshold)
depth_model = KBNetModel(
input_channels_image=input_channels_image,
input_channels_depth=input_channels_depth,
min_pool_sizes_sparse_to_dense_pool=min_pool_sizes_sparse_to_dense_pool,
max_pool_sizes_sparse_to_dense_pool=max_pool_sizes_sparse_to_dense_pool,
n_convolution_sparse_to_dense_pool=n_convolution_sparse_to_dense_pool,
n_filter_sparse_to_dense_pool=n_filter_sparse_to_dense_pool,
n_filters_encoder_image=n_filters_encoder_image,
n_filters_encoder_depth=n_filters_encoder_depth,
resolutions_backprojection=resolutions_backprojection,
n_filters_decoder=n_filters_decoder,
deconv_type=deconv_type,
weight_initializer=weight_initializer,
activation_func=activation_func,
min_predict_depth=min_predict_depth,
max_predict_depth=max_predict_depth,
device=device)
depth_model.restore_model(depth_model_restore_path)
depth_model.eval()
parameters_depth_model = depth_model.parameters()
log('Input paths:', log_path)
input_paths = [
image_path,
sparse_depth_path,
intrinsics_path,
]
if ground_truth_available:
input_paths.append(ground_truth_path)
for path in input_paths:
log(path, log_path)
log('', log_path)
log_input_settings(
log_path,
input_channels_image=input_channels_image,
input_channels_depth=input_channels_depth,
normalized_image_range=normalized_image_range,
outlier_removal_kernel_size=outlier_removal_kernel_size,
outlier_removal_threshold=outlier_removal_threshold)
log_network_settings(
log_path,
min_pool_sizes_sparse_to_dense_pool=min_pool_sizes_sparse_to_dense_pool,
max_pool_sizes_sparse_to_dense_pool=max_pool_sizes_sparse_to_dense_pool,
n_convolution_sparse_to_dense_pool=n_convolution_sparse_to_dense_pool,
n_filter_sparse_to_dense_pool=n_filter_sparse_to_dense_pool,
n_filters_encoder_image=n_filters_encoder_image,
n_filters_encoder_depth=n_filters_encoder_depth,
resolutions_backprojection=resolutions_backprojection,
n_filters_decoder=n_filters_decoder,
deconv_type=deconv_type,
min_predict_depth=min_predict_depth,
max_predict_depth=max_predict_depth,
weight_initializer=weight_initializer,
activation_func=activation_func,
parameters_depth_model=parameters_depth_model)
log_evaluation_settings(
log_path,
min_evaluate_depth=min_evaluate_depth,
max_evaluate_depth=max_evaluate_depth)
log_system_settings(
log_path,
checkpoint_path=checkpoint_path,
depth_model_restore_path=depth_model_restore_path,
device=device,
n_thread=1)
mae = np.zeros(n_sample)
rmse = np.zeros(n_sample)
imae = np.zeros(n_sample)
irmse = np.zeros(n_sample)
images = []
output_depths = []
sparse_depths = []
time_elapse = 0.0
for idx, (inputs, ground_truth) in enumerate(zip(dataloader, ground_truths)):
inputs = [
in_.to(device) for in_ in inputs
]
image, sparse_depth, intrinsics = inputs
time_start = time.time()
validity_map_depth = torch.where(
sparse_depth > 0,
torch.ones_like(sparse_depth),
sparse_depth)
filtered_sparse_depth, \
filtered_validity_map_depth = outlier_removal.remove_outliers(
sparse_depth=sparse_depth,
validity_map=validity_map_depth)
[image] = transforms.transform(
images_arr=[image],
random_transform_probability=0.0)
output_depth = depth_model.forward(
image=image,
sparse_depth=sparse_depth,
validity_map_depth=filtered_validity_map_depth,
intrinsics=intrinsics)
time_elapse = time_elapse + (time.time() - time_start)
output_depth = np.squeeze(output_depth.detach().cpu().numpy())
if save_outputs:
images.append(np.transpose(np.squeeze(image.cpu().numpy()), (1, 2, 0)))
sparse_depths.append(np.squeeze(filtered_sparse_depth.cpu().numpy()))
output_depths.append(output_depth)
if ground_truth_available:
ground_truth = np.squeeze(ground_truth)
validity_map = ground_truth[:, :, 1]
ground_truth = ground_truth[:, :, 0]
validity_mask = np.where(validity_map > 0, 1, 0)
min_max_mask = np.logical_and(
ground_truth > min_evaluate_depth,
ground_truth < max_evaluate_depth)
mask = np.where(np.logical_and(validity_mask, min_max_mask) > 0)
output_depth = output_depth[mask]
ground_truth = ground_truth[mask]
mae[idx] = eval_utils.mean_abs_err(1000.0 * output_depth, 1000.0 * ground_truth)
rmse[idx] = eval_utils.root_mean_sq_err(1000.0 * output_depth, 1000.0 * ground_truth)
imae[idx] = eval_utils.inv_mean_abs_err(0.001 * output_depth, 0.001 * ground_truth)
irmse[idx] = eval_utils.inv_root_mean_sq_err(0.001 * output_depth, 0.001 * ground_truth)
time_elapse = time_elapse * 1000.0
if ground_truth_available:
mae_mean = np.mean(mae)
rmse_mean = np.mean(rmse)
imae_mean = np.mean(imae)
irmse_mean = np.mean(irmse)
mae_std = np.std(mae)
rmse_std = np.std(rmse)
imae_std = np.std(imae)
irmse_std = np.std(irmse)
log('Evaluation results:', log_path)
log('{:>8} {:>8} {:>8} {:>8}'.format(
'MAE', 'RMSE', 'iMAE', 'iRMSE'),
log_path)
log('{:8.3f} {:8.3f} {:8.3f} {:8.3f}'.format(
mae_mean, rmse_mean, imae_mean, irmse_mean),
log_path)
log('{:>8} {:>8} {:>8} {:>8}'.format(
'+/-', '+/-', '+/-', '+/-'),
log_path)
log('{:8.3f} {:8.3f} {:8.3f} {:8.3f}'.format(
mae_std, rmse_std, imae_std, irmse_std),
log_path)
log('Total time: {:.2f} ms Average time per sample: {:.2f} ms'.format(
time_elapse, time_elapse / float(n_sample)))
if save_outputs:
log('Saving outputs to {}'.format(output_path), log_path)
outputs = zip(images, output_depths, sparse_depths, ground_truths)
image_dirpath = os.path.join(output_path, 'image')
output_depth_dirpath = os.path.join(output_path, 'output_depth')
sparse_depth_dirpath = os.path.join(output_path, 'sparse_depth')
ground_truth_dirpath = os.path.join(output_path, 'ground_truth')
dirpaths = [
image_dirpath,
output_depth_dirpath,
sparse_depth_dirpath,
ground_truth_dirpath
]
for dirpath in dirpaths:
if not os.path.exists(dirpath):
os.makedirs(dirpath)
for idx, (image, output_depth, sparse_depth, ground_truth) in enumerate(outputs):
if keep_input_filenames:
filename = os.path.basename(image_paths[idx])
else:
filename = '{:010d}.png'.format(idx)
image_path = os.path.join(image_dirpath, filename)
image = (255 * image).astype(np.uint8)
Image.fromarray(image).save(image_path)
output_depth_path = os.path.join(output_depth_dirpath, filename)
data_utils.save_depth(output_depth, output_depth_path)
sparse_depth_path = os.path.join(sparse_depth_dirpath, filename)
data_utils.save_depth(sparse_depth, sparse_depth_path)
if ground_truth_available:
ground_truth_path = os.path.join(ground_truth_dirpath, filename)
data_utils.save_depth(ground_truth[..., 0], ground_truth_path)
def log_input_settings(log_path,
n_batch=None,
n_height=None,
n_width=None,
input_channels_image=settings.INPUT_CHANNELS_IMAGE,
input_channels_depth=settings.INPUT_CHANNELS_DEPTH,
normalized_image_range=settings.NORMALIZED_IMAGE_RANGE,
outlier_removal_kernel_size=settings.OUTLIER_REMOVAL_KERNEL_SIZE,
outlier_removal_threshold=settings.OUTLIER_REMOVAL_THRESHOLD):
batch_settings_text = ''
batch_settings_vars = []
if n_batch is not None:
batch_settings_text = batch_settings_text + 'n_batch={}'
batch_settings_vars.append(n_batch)
batch_settings_text = \
batch_settings_text + ' ' if len(batch_settings_text) > 0 else batch_settings_text
if n_height is not None:
batch_settings_text = batch_settings_text + 'n_height={}'
batch_settings_vars.append(n_height)
batch_settings_text = \
batch_settings_text + ' ' if len(batch_settings_text) > 0 else batch_settings_text
if n_width is not None:
batch_settings_text = batch_settings_text + 'n_width={}'
batch_settings_vars.append(n_width)
log('Input settings:', log_path)
if len(batch_settings_vars) > 0:
log(batch_settings_text.format(*batch_settings_vars),
log_path)
log('input_channels_image={} input_channels_depth={}'.format(
input_channels_image, input_channels_depth),
log_path)
log('normalized_image_range={}'.format(normalized_image_range),
log_path)
log('outlier_removal_kernel_size={} outlier_removal_threshold={:.2f}'.format(
outlier_removal_kernel_size, outlier_removal_threshold),
log_path)
log('', log_path)
def log_network_settings(log_path,
min_pool_sizes_sparse_to_dense_pool,
max_pool_sizes_sparse_to_dense_pool,
n_convolution_sparse_to_dense_pool,
n_filter_sparse_to_dense_pool,
n_filters_encoder_image,
n_filters_encoder_depth,
resolutions_backprojection,
n_filters_decoder,
deconv_type,
min_predict_depth,
max_predict_depth,
weight_initializer,
activation_func,
parameters_depth_model=[],
parameters_pose_model=[]):
n_parameter_depth = sum(p.numel() for p in parameters_depth_model)
n_parameter_pose = sum(p.numel() for p in parameters_pose_model)
n_parameter = n_parameter_depth + n_parameter_pose
n_parameter_text = 'n_parameter={}'.format(n_parameter)
n_parameter_vars = []
if n_parameter_depth > 0 :
n_parameter_text = n_parameter_text + 'n_parameter_depth={}'
n_parameter_vars.append(n_parameter_depth)
n_parameter_text = \
n_parameter_text + ' ' if len(n_parameter_text) > 0 else n_parameter_text
if n_parameter_pose > 0 :
n_parameter_text = n_parameter_text + 'n_parameter_pose={}'
n_parameter_vars.append(n_parameter_pose)
n_parameter_text = \
n_parameter_text + ' ' if len(n_parameter_text) > 0 else n_parameter_text
log('Sparse to dense pooling settings:', log_path)
log('min_pool_sizes_sparse_to_dense_pool={}'.format(min_pool_sizes_sparse_to_dense_pool),
log_path)
log('max_pool_sizes_sparse_to_dense_pool={}'.format(max_pool_sizes_sparse_to_dense_pool),
log_path)
log('n_convolution_sparse_to_dense_pool={}'.format(n_convolution_sparse_to_dense_pool),
log_path)
log('n_filter_sparse_to_dense_pool={}'.format(n_filter_sparse_to_dense_pool),
log_path)
log('', log_path)
log('Depth network settings:', log_path)
log('n_filters_encoder_image={}'.format(n_filters_encoder_image),
log_path)
log('n_filters_encoder_depth={}'.format(n_filters_encoder_depth),
log_path)
log('resolutions_backprojection={}'.format(resolutions_backprojection),
log_path)
log('n_filters_decoder={}'.format(n_filters_decoder),
log_path)
log('deconv_type={}'.format(deconv_type),
log_path)
log('min_predict_depth={:.2f} max_predict_depth={:.2f}'.format(
min_predict_depth, max_predict_depth),
log_path)
log('', log_path)
log('Weight settings:', log_path)
log('n_parameter={} n_parameter_depth={} n_parameter_pose={}'.format(
n_parameter, n_parameter_depth, n_parameter_pose),
log_path)
log('weight_initializer={} activation_func={}'.format(
weight_initializer, activation_func),
log_path)
log('', log_path)
def log_training_settings(log_path,
n_batch,
n_train_sample,
n_train_step,
learning_rates,
learning_schedule,
augmentation_probabilities,
augmentation_schedule,
augmentation_random_crop_type,
augmentation_random_flip_type,
augmentation_random_remove_points,
augmentation_random_noise_type,
augmentation_random_noise_spread):
log('Training settings:', log_path)
log('n_sample={} n_epoch={} n_step={}'.format(
n_train_sample, learning_schedule[-1], n_train_step),
log_path)
log('learning_schedule=[%s]' %
', '.join('{}-{} : {}'.format(
ls * (n_train_sample // n_batch), le * (n_train_sample // n_batch), v)
for ls, le, v in zip([0] + learning_schedule[:-1], learning_schedule, learning_rates)),
log_path)
log('', log_path)
log('Augmentation settings:', log_path)
log('augmentation_schedule=[%s]' %
', '.join('{}-{} : {}'.format(
ls * (n_train_sample // n_batch), le * (n_train_sample // n_batch), v)
for ls, le, v in zip([0] + augmentation_schedule[:-1], augmentation_schedule, augmentation_probabilities)),
log_path)
log('augmentation_random_crop_type={}'.format(augmentation_random_crop_type),
log_path)
log('augmentation_random_flip_type={}'.format(augmentation_random_flip_type),
log_path)
log('augmentation_random_remove_points={}'.format(augmentation_random_remove_points),
log_path)
log('augmentation_random_noise_type={} augmentation_random_noise_spread={}'.format(
augmentation_random_noise_type, augmentation_random_noise_spread),
log_path)
log('', log_path)
def log_loss_func_settings(log_path,
w_color,
w_structure,
w_sparse_depth,
w_smoothness,
w_weight_decay_depth,
w_weight_decay_pose):
log('Loss function settings:', log_path)
log('w_color={:.1e} w_structure={:.1e} w_sparse_depth={:.1e}'.format(
w_color, w_structure, w_sparse_depth),
log_path)
log('w_smoothness={:.1e}'.format(w_smoothness),
log_path)
log('w_weight_decay_depth={:.1e} w_weight_decay_pose={:.1e}'.format(
w_weight_decay_depth, w_weight_decay_pose),
log_path)
log('', log_path)
def log_evaluation_settings(log_path,
min_evaluate_depth,
max_evaluate_depth):
log('Evaluation settings:', log_path)
log('min_evaluate_depth={:.2f} max_evaluate_depth={:.2f}'.format(
min_evaluate_depth, max_evaluate_depth),
log_path)
log('', log_path)
def log_system_settings(log_path,
checkpoint_path,
n_checkpoint=None,
summary_event_path=None,
n_summary=None,
n_summary_display=None,
validation_start_step=None,
depth_model_restore_path=None,
pose_model_restore_path=None,
device=torch.device('cuda'),
n_thread=8):
log('Checkpoint settings:', log_path)
if checkpoint_path is not None:
log('checkpoint_path={}'.format(checkpoint_path), log_path)
if n_checkpoint is not None:
log('checkpoint_save_frequency={}'.format(n_checkpoint), log_path)
if validation_start_step is not None:
log('validation_start_step={}'.format(validation_start_step), log_path)
log('', log_path)
summary_settings_text = ''
summary_settings_vars = []
if summary_event_path is not None:
log('Tensorboard settings:', log_path)
log('event_path={}'.format(summary_event_path), log_path)
if n_summary is not None:
summary_settings_text = summary_settings_text + 'log_summary_frequency={}'
summary_settings_vars.append(n_summary)
summary_settings_text = \
summary_settings_text + ' ' if len(summary_settings_text) > 0 else summary_settings_text
if n_summary_display is not None:
summary_settings_text = summary_settings_text + 'n_summary_display={}'
summary_settings_vars.append(n_summary_display)
summary_settings_text = \
summary_settings_text + ' ' if len(summary_settings_text) > 0 else summary_settings_text
if len(summary_settings_text) > 0:
log(summary_settings_text.format(*summary_settings_vars), log_path)
if depth_model_restore_path is not None and depth_model_restore_path != '':
log('depth_model_restore_path={}'.format(depth_model_restore_path),
log_path)
if pose_model_restore_path is not None and pose_model_restore_path != '':
log('pose_model_restore_path={}'.format(pose_model_restore_path),
log_path)
log('', log_path)
log('Hardware settings:', log_path)
log('device={}'.format(device.type), log_path)
log('n_thread={}'.format(n_thread), log_path)
log('', log_path)
| true
| true
|
790518e519f89f3285053fb66cb9445cc0fce99a
| 508
|
py
|
Python
|
nltk/classify/svm.py
|
tyomitch/nltk
|
943b7bb3181118710ea4f22e0b63ce25adfffa08
|
[
"Apache-2.0"
] | 4
|
2016-05-05T05:39:45.000Z
|
2019-08-14T01:39:00.000Z
|
nltk/classify/svm.py
|
tyomitch/nltk
|
943b7bb3181118710ea4f22e0b63ce25adfffa08
|
[
"Apache-2.0"
] | 1
|
2015-10-07T20:45:50.000Z
|
2015-10-07T22:26:07.000Z
|
nltk/classify/svm.py
|
tyomitch/nltk
|
943b7bb3181118710ea4f22e0b63ce25adfffa08
|
[
"Apache-2.0"
] | 2
|
2019-02-20T22:37:18.000Z
|
2020-09-02T20:14:51.000Z
|
# Natural Language Toolkit: SVM-based classifier
#
# Copyright (C) 2001-2022 NLTK Project
# Author: Leon Derczynski <leon@dcs.shef.ac.uk>
#
# URL: <https://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
nltk.classify.svm was deprecated. For classification based
on support vector machines SVMs use nltk.classify.scikitlearn
(or `scikit-learn <https://scikit-learn.org>`_ directly).
"""
class SvmClassifier:
def __init__(self, *args, **kwargs):
raise NotImplementedError(__doc__)
| 28.222222
| 61
| 0.73622
|
class SvmClassifier:
def __init__(self, *args, **kwargs):
raise NotImplementedError(__doc__)
| true
| true
|
7905190283df02131d78042daf455229da69c968
| 611
|
py
|
Python
|
ops/nms.py
|
LiuHaolan/models
|
1639b3039237c3997c51ff87f0b6113bb2e8d236
|
[
"Apache-2.0"
] | 43
|
2021-06-03T09:07:08.000Z
|
2022-03-31T15:21:48.000Z
|
ops/nms.py
|
LiuHaolan/models
|
1639b3039237c3997c51ff87f0b6113bb2e8d236
|
[
"Apache-2.0"
] | 64
|
2021-05-31T10:34:06.000Z
|
2022-01-17T03:44:58.000Z
|
ops/nms.py
|
LiuHaolan/models
|
1639b3039237c3997c51ff87f0b6113bb2e8d236
|
[
"Apache-2.0"
] | 37
|
2021-07-04T03:13:18.000Z
|
2022-03-25T07:30:47.000Z
|
import oneflow as flow
import oneflow as flow_exp
from oneflow import Tensor
def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor:
scores_inds = flow_exp.argsort(scores, dim=0, descending=True)
boxes = flow._C.gather(boxes, scores_inds, axis=0)
_nms_op = (
flow_exp.builtin_op("nms")
.Input("in")
.Output("out")
.Attr("iou_threshold", iou_threshold)
.Attr("keep_n", -1)
.Build()
)
keep = _nms_op(boxes)[0]
index = flow_exp.squeeze(flow_exp.argwhere(keep), dim=[1])
return flow._C.gather(scores_inds, index, axis=0)
| 30.55
| 71
| 0.648118
|
import oneflow as flow
import oneflow as flow_exp
from oneflow import Tensor
def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor:
scores_inds = flow_exp.argsort(scores, dim=0, descending=True)
boxes = flow._C.gather(boxes, scores_inds, axis=0)
_nms_op = (
flow_exp.builtin_op("nms")
.Input("in")
.Output("out")
.Attr("iou_threshold", iou_threshold)
.Attr("keep_n", -1)
.Build()
)
keep = _nms_op(boxes)[0]
index = flow_exp.squeeze(flow_exp.argwhere(keep), dim=[1])
return flow._C.gather(scores_inds, index, axis=0)
| true
| true
|
790519040488b2751b8ca57d8241ea867d38f82f
| 6,597
|
py
|
Python
|
modules/youtube_music.py
|
mavroudo/jarvis-discord
|
918540a67d7ac48584e8efd6a06385ec5228f4d5
|
[
"MIT"
] | null | null | null |
modules/youtube_music.py
|
mavroudo/jarvis-discord
|
918540a67d7ac48584e8efd6a06385ec5228f4d5
|
[
"MIT"
] | null | null | null |
modules/youtube_music.py
|
mavroudo/jarvis-discord
|
918540a67d7ac48584e8efd6a06385ec5228f4d5
|
[
"MIT"
] | null | null | null |
import os
import discord
import youtube_dl as ytdl
class MusicPlayer:
'''
This module is responsible for connecting and disconnecting the bot from a voice channel, downloading songs from
youtube and add them in the queue . Basic music functions like pause, resume, stop and play, in order to give
users a simple music bot based on the new api of discord.
'''
def __init__(self):
self.queue = []
self.voiceChannel = None
self.ydl_opts = {
'format': 'bestaudio/best',
# 'quiet' : True,
'outtmpl': 'songs/%(title)s-%(id)s.%(ext)s',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
async def connect(self, channel):
'''
Connects bot to the given voice channel. If it is not already connected.
:param channel: The channel from which the user send the command
'''
if self.voiceChannel is None or not self.voiceChannel.is_connected():
self.voiceChannel = await channel.connect()
async def disconnect(self):
'''
Disconnects from the channel that the bot is already connected. If there is no such a channel,
this function will simply do nothing
'''
if self.voiceChannel is not None and self.voiceChannel.is_connected():
await self.voiceChannel.disconnect()
def getNextSong(self):
'''
If the queue is not empty this function will remove the first song from the queue and return it
:return: the next song of the queue, or None if the queue is empty
'''
if self.queue:
return self.queue.pop(0)
else:
return None
def clear_folder(self):
'''
Because the songs will be downloaded, it is important to delete them if there are not longer needed.
This function deletes the songs that are not in the queue (not one of the upcoming songs)
'''
for song in os.listdir("songs/"):
if "songs/" + song not in self.queue:
os.remove("songs/" + song)
async def add_song(self, url, ctx):
'''
Add a new song from the youtube in the queue. It will not be downloaded if it is already in the songs file
:param url: The url of the youtube song
:param ctx: The channel from which the user send the command
'''
with ytdl.YoutubeDL(self.ydl_opts) as ydl:
info_dict = ydl.extract_info(url, download=False)
title = "songs/" + info_dict['title'] + "-" + info_dict['id'] + ".mp3"
if title not in self.queue:
await ctx.send("Your song is downloading now!")
ydl.extract_info(url, download=True)
self.queue.append(title)
if self.voiceChannel is None or not self.voiceChannel.is_connected() or not self.voiceChannel.is_playing():
await ctx.send("Your song has added to the queue, use $play to start the party!!")
else:
await ctx.send("Your song has added to the queue")
def load_next_song(self):
'''
This will create a FFMPEG object and start playing it in the voice channel
'''
if not self.voiceChannel.is_playing() and self.queue:
audio_source = discord.FFmpegPCMAudio(self.getNextSong())
# TODO: make the bot play the next song after the previous one has ended
self.voiceChannel.play(audio_source, after=None)
async def pause_song(self, ctx):
'''
Pauses a song that is already being played or send a message if there is no such song
:param ctx: The channel from which the user gave the command.
'''
if self.voiceChannel is not None and self.voiceChannel.is_connected() and self.voiceChannel.is_playing():
self.voiceChannel.pause()
else:
await ctx.send("There is no song playing in order to pause it")
async def resume_song(self, ctx):
'''
Resumes a song if there is one that has been paused or send a message if there is no such song
:param ctx: The channel from which the user gave the command.
'''
if self.voiceChannel is not None and self.voiceChannel.is_connected() and self.voiceChannel.is_paused():
self.voiceChannel.resume()
else:
await ctx.send("There is no song paused in order to resume it")
async def stop(self, ctx):
'''
Stops the music if there is music or sends message if there is not. At the end clears the file of
the unnecessary songs.
:param ctx: The channel from which the user gave the command.
'''
if self.voiceChannel is not None and self.voiceChannel.is_connected() and self.voiceChannel.is_playing():
self.voiceChannel.stop()
else:
await ctx.send("There is no song playing in order to stop it")
self.clear_folder()
async def next(self, ctx):
'''
Stops this song and start the next one. The user will be informed with message if there is no other song or if
there is no song playing at the moment
:param ctx: The channel from which the user gave the command.
'''
if self.voiceChannel is not None and self.voiceChannel.is_connected() and self.voiceChannel.is_playing() \
and self.queue:
await self.stop(ctx)
self.load_next_song()
elif not self.queue:
await ctx.send("There is no other song in the queue")
else:
await ctx.send("There is no song playing, maybe use $play to start playing songs from the queue")
async def play(self, ctx, channel):
'''
Starts playing the first song in the queue. If there are not songs in the queue or there is some music playing
at this moment the user will ne informed with messages
:param ctx: The channel from which the user gave the command.
'''
await self.connect(channel)
if self.voiceChannel is not None and self.voiceChannel.is_connected() and not self.voiceChannel.is_playing()\
and self.queue:
self.load_next_song()
elif not self.queue:
await ctx.send("There is no song in the list")
elif self.voiceChannel.is_playing():
await ctx.send("THere is already some music playing. Increase the volume and join the party!")
| 43.117647
| 120
| 0.621949
|
import os
import discord
import youtube_dl as ytdl
class MusicPlayer:
def __init__(self):
self.queue = []
self.voiceChannel = None
self.ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': 'songs/%(title)s-%(id)s.%(ext)s',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
async def connect(self, channel):
if self.voiceChannel is None or not self.voiceChannel.is_connected():
self.voiceChannel = await channel.connect()
async def disconnect(self):
if self.voiceChannel is not None and self.voiceChannel.is_connected():
await self.voiceChannel.disconnect()
def getNextSong(self):
if self.queue:
return self.queue.pop(0)
else:
return None
def clear_folder(self):
for song in os.listdir("songs/"):
if "songs/" + song not in self.queue:
os.remove("songs/" + song)
async def add_song(self, url, ctx):
with ytdl.YoutubeDL(self.ydl_opts) as ydl:
info_dict = ydl.extract_info(url, download=False)
title = "songs/" + info_dict['title'] + "-" + info_dict['id'] + ".mp3"
if title not in self.queue:
await ctx.send("Your song is downloading now!")
ydl.extract_info(url, download=True)
self.queue.append(title)
if self.voiceChannel is None or not self.voiceChannel.is_connected() or not self.voiceChannel.is_playing():
await ctx.send("Your song has added to the queue, use $play to start the party!!")
else:
await ctx.send("Your song has added to the queue")
def load_next_song(self):
if not self.voiceChannel.is_playing() and self.queue:
audio_source = discord.FFmpegPCMAudio(self.getNextSong())
self.voiceChannel.play(audio_source, after=None)
async def pause_song(self, ctx):
if self.voiceChannel is not None and self.voiceChannel.is_connected() and self.voiceChannel.is_playing():
self.voiceChannel.pause()
else:
await ctx.send("There is no song playing in order to pause it")
async def resume_song(self, ctx):
if self.voiceChannel is not None and self.voiceChannel.is_connected() and self.voiceChannel.is_paused():
self.voiceChannel.resume()
else:
await ctx.send("There is no song paused in order to resume it")
async def stop(self, ctx):
if self.voiceChannel is not None and self.voiceChannel.is_connected() and self.voiceChannel.is_playing():
self.voiceChannel.stop()
else:
await ctx.send("There is no song playing in order to stop it")
self.clear_folder()
async def next(self, ctx):
if self.voiceChannel is not None and self.voiceChannel.is_connected() and self.voiceChannel.is_playing() \
and self.queue:
await self.stop(ctx)
self.load_next_song()
elif not self.queue:
await ctx.send("There is no other song in the queue")
else:
await ctx.send("There is no song playing, maybe use $play to start playing songs from the queue")
async def play(self, ctx, channel):
await self.connect(channel)
if self.voiceChannel is not None and self.voiceChannel.is_connected() and not self.voiceChannel.is_playing()\
and self.queue:
self.load_next_song()
elif not self.queue:
await ctx.send("There is no song in the list")
elif self.voiceChannel.is_playing():
await ctx.send("THere is already some music playing. Increase the volume and join the party!")
| true
| true
|
7905193c012a60323d039f462f94f5022d2f795a
| 1,699
|
py
|
Python
|
app/core/migrations/0001_initial.py
|
ing-ivan-31/recipe-app
|
3006f93b7ace6cc6d092af1c18275d9b9e9d9853
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
ing-ivan-31/recipe-app
|
3006f93b7ace6cc6d092af1c18275d9b9e9d9853
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
ing-ivan-31/recipe-app
|
3006f93b7ace6cc6d092af1c18275d9b9e9d9853
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2019-05-08 20:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 49.970588
| 266
| 0.637434
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| true
| true
|
790519b7959d0e8179d7bf9c0c1de569a69fc55a
| 262
|
py
|
Python
|
HW2/majority_baseline_classifier.py
|
dompuiu/PROEA-821-005-Spring-2018
|
cde4a09a3a60b37a332895a524a7a3f63343e601
|
[
"MIT"
] | null | null | null |
HW2/majority_baseline_classifier.py
|
dompuiu/PROEA-821-005-Spring-2018
|
cde4a09a3a60b37a332895a524a7a3f63343e601
|
[
"MIT"
] | null | null | null |
HW2/majority_baseline_classifier.py
|
dompuiu/PROEA-821-005-Spring-2018
|
cde4a09a3a60b37a332895a524a7a3f63343e601
|
[
"MIT"
] | null | null | null |
from collections import Counter
class MajorityBaselineClassifier:
@staticmethod
def train(_, labels):
c = Counter(labels)
return c.most_common()[0][0]
@staticmethod
def predict(_, majority_label):
return majority_label
| 20.153846
| 36
| 0.675573
|
from collections import Counter
class MajorityBaselineClassifier:
@staticmethod
def train(_, labels):
c = Counter(labels)
return c.most_common()[0][0]
@staticmethod
def predict(_, majority_label):
return majority_label
| true
| true
|
790519e8f11a75329366a127f8c433f87f48c4a2
| 3,891
|
py
|
Python
|
tests/tasks/tasks/instr/test_apply_mag_field_task.py
|
jerjohste/exopy_hqc_legacy
|
c746beea6b175697ae3bfdab94309dc872d3d908
|
[
"BSD-3-Clause"
] | null | null | null |
tests/tasks/tasks/instr/test_apply_mag_field_task.py
|
jerjohste/exopy_hqc_legacy
|
c746beea6b175697ae3bfdab94309dc872d3d908
|
[
"BSD-3-Clause"
] | 1
|
2020-03-23T07:53:05.000Z
|
2020-03-23T07:53:05.000Z
|
tests/tasks/tasks/instr/test_apply_mag_field_task.py
|
jerjohste/exopy_hqc_legacy
|
c746beea6b175697ae3bfdab94309dc872d3d908
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by ExopyHqcLegacy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Tests for the ApplyMagFieldTask
"""
from multiprocessing import Event
import pytest
import enaml
from exopy.tasks.api import RootTask
from exopy.tasks.tasks.logic.loop_task import LoopTask
from exopy.testing.util import show_and_close_widget
from exopy_hqc_legacy.tasks.tasks.instr.apply_mag_field_task\
import ApplyMagFieldTask
with enaml.imports():
from exopy.tasks.tasks.logic.views.loop_view import LoopView
from exopy_hqc_legacy.tasks.tasks.instr.views.apply_mag_field_view\
import ApplyMagFieldView
from .instr_helper import (InstrHelper, InstrHelperStarter, DummyJob,
PROFILES, DRIVERS)
class TestApplyMagFieldTask(object):
def setup(self):
self.root = RootTask(should_stop=Event(), should_pause=Event())
self.task = ApplyMagFieldTask(name='Test',
parallel={'activated': False})
self.root.add_child_task(0, self.task)
self.root.run_time[DRIVERS] = {'Test': (InstrHelper,
InstrHelperStarter())}
self.root.run_time[PROFILES] =\
{'Test1':
{'connections': {'C': {'owner': [],
'output_fluctuations': 1e-6,
'heater_state': []}},
'settings': {'S': {'sweep_to_field': [DummyJob(), DummyJob(),
DummyJob()],
'sweep_to_persistent_field': [DummyJob()],
'read_persistent_field': [1],
'check_connection': [True]}}
}
}
# This is set simply to make sure the test of InstrTask pass.
self.task.selected_instrument = ('Test1', 'Test', 'C', 'S')
def test_check1(self):
"""Simply test that everything is ok if field can be evaluated.
"""
self.task.field = '3.0'
test, traceback = self.task.check(test_instr=True)
assert test
assert not traceback
assert self.task.get_from_database('Test_field') == 3.0
def test_check2(self):
"""Check handling a wrong field.
"""
self.task.field = '*1.0*'
test, traceback = self.task.check(test_instr=True)
assert not test
assert len(traceback) == 1
assert 'root/Test-field'in traceback
assert self.task.get_from_database('Test_field') == 0.01
def test_perform1(self):
"""Simple test when everything is right.
"""
self.task.field = '2.0'
self.root.prepare()
self.task.perform()
assert self.root.get_from_database('Test_field') == 2.0
@pytest.mark.ui
def test_apply_mag_field_view1(exopy_qtbot, root_view, task_workbench):
"""Test ApplyMagFieldView widget outisde of a LoopTask.
"""
task = ApplyMagFieldTask(name='Test')
root_view.task.add_child_task(0, task)
show_and_close_widget(exopy_qtbot, ApplyMagFieldView(task=task, root=root_view))
@pytest.mark.ui
def test_apply_mag_field_view2(exopy_qtbot, root_view, task_workbench):
"""Test ApplyMagFieldView widget inside of a LoopTask.
"""
task = ApplyMagFieldTask(name='Test')
loop = LoopTask(name='r', task=task)
root_view.task.add_child_task(0, loop)
# XXX check for absence of target field
show_and_close_widget(exopy_qtbot, LoopView(task=loop, root=root_view))
| 34.131579
| 84
| 0.58751
|
from multiprocessing import Event
import pytest
import enaml
from exopy.tasks.api import RootTask
from exopy.tasks.tasks.logic.loop_task import LoopTask
from exopy.testing.util import show_and_close_widget
from exopy_hqc_legacy.tasks.tasks.instr.apply_mag_field_task\
import ApplyMagFieldTask
with enaml.imports():
from exopy.tasks.tasks.logic.views.loop_view import LoopView
from exopy_hqc_legacy.tasks.tasks.instr.views.apply_mag_field_view\
import ApplyMagFieldView
from .instr_helper import (InstrHelper, InstrHelperStarter, DummyJob,
PROFILES, DRIVERS)
class TestApplyMagFieldTask(object):
def setup(self):
self.root = RootTask(should_stop=Event(), should_pause=Event())
self.task = ApplyMagFieldTask(name='Test',
parallel={'activated': False})
self.root.add_child_task(0, self.task)
self.root.run_time[DRIVERS] = {'Test': (InstrHelper,
InstrHelperStarter())}
self.root.run_time[PROFILES] =\
{'Test1':
{'connections': {'C': {'owner': [],
'output_fluctuations': 1e-6,
'heater_state': []}},
'settings': {'S': {'sweep_to_field': [DummyJob(), DummyJob(),
DummyJob()],
'sweep_to_persistent_field': [DummyJob()],
'read_persistent_field': [1],
'check_connection': [True]}}
}
}
self.task.selected_instrument = ('Test1', 'Test', 'C', 'S')
def test_check1(self):
self.task.field = '3.0'
test, traceback = self.task.check(test_instr=True)
assert test
assert not traceback
assert self.task.get_from_database('Test_field') == 3.0
def test_check2(self):
self.task.field = '*1.0*'
test, traceback = self.task.check(test_instr=True)
assert not test
assert len(traceback) == 1
assert 'root/Test-field'in traceback
assert self.task.get_from_database('Test_field') == 0.01
def test_perform1(self):
self.task.field = '2.0'
self.root.prepare()
self.task.perform()
assert self.root.get_from_database('Test_field') == 2.0
@pytest.mark.ui
def test_apply_mag_field_view1(exopy_qtbot, root_view, task_workbench):
task = ApplyMagFieldTask(name='Test')
root_view.task.add_child_task(0, task)
show_and_close_widget(exopy_qtbot, ApplyMagFieldView(task=task, root=root_view))
@pytest.mark.ui
def test_apply_mag_field_view2(exopy_qtbot, root_view, task_workbench):
task = ApplyMagFieldTask(name='Test')
loop = LoopTask(name='r', task=task)
root_view.task.add_child_task(0, loop)
show_and_close_widget(exopy_qtbot, LoopView(task=loop, root=root_view))
| true
| true
|
790519f2c4351574c9ff895f5f1bf4735b184586
| 10,882
|
py
|
Python
|
monitor2mail.py
|
bkittler/monitor2mail
|
e07474f81f954ac7ef8d47b3f4a8185ea9191318
|
[
"MIT"
] | null | null | null |
monitor2mail.py
|
bkittler/monitor2mail
|
e07474f81f954ac7ef8d47b3f4a8185ea9191318
|
[
"MIT"
] | null | null | null |
monitor2mail.py
|
bkittler/monitor2mail
|
e07474f81f954ac7ef8d47b3f4a8185ea9191318
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# coding utf-8
import sys
from sys import exit
import os
import socket
import requests
import smtplib
import ssl
import dns.resolver
""" Python script to monitor list of url (https/http/ns/mx)
and send mail if down"""
__author__ = "Benjamin Kittler"
__copyright__ = "Copyright 2021, KITTLER"
__credits__ = ["Benjamin Kittler"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Benjamin Kittler"
__email__ = "kittler @T. gmail. com"
__status__ = "integration"
"""
############################################################
# Please complete these variable before the first launch #
############################################################
"""
# mail provider : TO BE MODIFIED
smtp_address = 'smtp.gmail.com'
smtp_port = 465
# email address and password : TO BE MODIFIED
email_address = 'EMAIL@EMAIL.COM'
email_password = 'PASSWORD'
""" Python script to monitor list of url (https/http/ns/mx)
and send mail if down"""
def check(file_to_check, testmode, debug):
"""
Function open file, read each line and complete a dictionnary
For each entry, launch check url : http/https or launch resolution then ping for MX/NS entry
If one url not respond, launch email to alert
Parameters
----------
file_to_check : string
This is the name of the fillethat contain list of url must be checked
and mail for alert
testmode : string
This value is 0 by defaut and is to 1 if user launchscript on test mode:
print enabled and no mail send
debug : string
This value is 0 by defaut and is to 1 if user launchscript on debug mode:
more print enabled and no mail send
Returns
-------
None.
"""
try:
file = open(file_to_check, "r")
except:
exit('open file failed')
# lines contain all line of file
lines = file.readlines()
# close the file after read all lines
file.close()
# create dict of url
url_dict = {}
# add each element on dict
for line in lines:
# clean end of line contain \n
line = line.replace("\n", "")
# clean line contain multiple space
line = line.replace(" ", "\t")
# clean line contain multiple \t
line = line.replace("\t\t\t", "\t")
line = line.replace("\t\t", "\t")
# clean line contain http:// or https://
line = line.replace("http://", "")
line = line.replace("https://", "")
element = line.split("\t")
cle = element[0]
data = element[1]
url_dict[cle] = data
if debug == 1:
print("Url dict : \n", url_dict)
if testmode == 1:
print("Check :")
for url, mail in url_dict.items():
# check http or https entry
if "ns://" not in url and "mx://" not in url and "ping://" not in url:
availability = str(request_url(url))
# import pdb; pdb.set_trace()
if (availability == ("200") or (availability == "301")
or (availability == "302")):
request_url_result = "UP"
else:
request_url_result = "DOWN"
if testmode == 1:
print("url : ", url, " -> mail : ", mail,
" Result :", request_url_result)
else:
if request_url_result == "DOWN":
# print("mail :", mail)
alert_mail(mail, request_url_result, url)
# check ns entry
elif "ns://" in url:
request_url_result = ping_name(url, "NS")
if testmode == 1:
print("url : ", url, " -> mail : ", mail,
" Result NS :", request_url_result)
else:
if request_url_result == "DOWN":
# print("mail :", mail)
alert_mail(mail, request_url_result, url)
# check mx entry
elif "mx://" in url:
request_url_result = ping_name(url, "MX")
if testmode == 1:
print("url : ", url, " -> mail : ", mail,
" Result MX :", request_url_result)
else:
if request_url_result == "DOWN":
# print("mail :", mail)
alert_mail(mail, request_url_result, url)
# check ping entry
elif "ping://" in url:
url = url.replace("ping://", "")
request_url_result = ping_ip(url)
if testmode == 1:
print("url : ", url, " -> mail : ", mail,
" Result Ping :", request_url_result)
else:
if request_url_result == "DOWN":
# print("mail :", mail)
alert_mail(mail, request_url_result, url)
# ignore entry
else:
if testmode == 1:
print("url : ", url, " -> mail : ", mail, "ignored")
exit()
def request_url(url):
"""
Function to send https or http request to this url and return code result.
Parameters
----------
url : string
This variable contain url must be checked
Returns
-------
status_code : int
Code result
"""
try:
url = "https://" + format(url)
response = requests.head(url, allow_redirects=True, timeout=10)
except:
try:
url = "http://" + format(url)
response = requests.head(url, allow_redirects=True, timeout=10)
except:
return "404"
# print("Request failed")
if response.status_code:
return response.status_code
else:
return "404"
def ping_name(name, dns_type):
"""
Function to resolve name and ping this host.
print the result of ping
Parameters
----------
name : string
This variable contain the name (host) must be checked
dns_type : string
This variable contain the DNS type : A, NS, MX
Returns
-------
status : String
Status result : UP or DOWN
"""
# clean name host
name = name.replace("ns://", "")
name = name.replace("mx://", "")
# make resolution
if dns_type == "A":
try:
addr1 = socket.gethostbyname_ex(name)
print("Resolution -> {}".format(addr1[2]))
name = addr1[2]
except:
print("Resolution failed")
# make resolution
if dns_type == "MX":
try:
answers = dns.resolver.resolve(name, 'MX')
for rdata in answers:
# import pdb; pdb.set_trace()
#print('Mail exchange:',rdata.exchange)
addr1 = socket.gethostbyname_ex(str(rdata.exchange))
#print("Resolution -> {}".format(addr1[2]))
name = addr1[2]
if ping_ip(name) == "UP":
return "UP"
return ping_ip(name)
except:
print("Resolution failed")
return "DOWN"
# make resolution
if dns_type == "NS":
try:
answers = dns.resolver.resolve(name, 'NS')
for rdata in answers:
#import pdb; pdb.set_trace()
#print('Mail exchange:',rdata.exchange)
addr1 = socket.gethostbyname_ex(str(rdata.target))
#print("Resolution -> {}".format(addr1[2]))
name = addr1[2]
for srv in name:
if ping_ip(srv) == "UP":
return "UP"
return ping_ip(name)
except:
print("Resolution failed")
return "DOWN"
def ping_ip(name):
"""
Function to ping name.
return the result of ping
Parameters
----------
name : string
This variable is IP address
Returns
-------
status : String
Status result : UP or DOWN
"""
try:
# import pdb; pdb.set_trace()
name = str(name).strip('[]')
name = str(name).strip("''")
hostname = format(name)
response = os.system("ping -c 1 " + hostname + " > /dev/null 2>&1")
# import pdb; pdb.set_trace()
if response == 0:
return "UP"
# print("Response ping : OK")
else:
return "DOWN"
# print("Response ping : KO")
except requests.ConnectionError:
return "DOWN"
# print("Response ping : failed to connect")
return "DOWN"
def alert_mail(email_receiver, service_status, url):
"""
Function to send email Alert
Parameters
----------
email_receiver : string
destination email for alert
service_status : string
service status
url : string
url concertned by alert
Returns
-------
None.
"""
# create subject
service_status = "Subject:{}\n\n".format(service_status) + "Server :{} \n".format(url)
# create connexion
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_address, smtp_port, context=context) as server:
# account connexion
server.login(email_address, email_password)
# sending mail
server.sendmail(email_address, email_receiver, service_status)
def main(argv, testmode, debug):
"""
Print the fileopened and lauchn the check of file with testmode / debug value
Parameters
----------
file_to_check : string
This is the name of the fillethat contain list of url must be checked
and mail for alert
testmode : string
This value is 0 by defaut and is to 1 if user launchscript on test mode:
print enabled and no mail send
debug : string
This value is 0 by defaut and is to 1 if user launchscript on debug mode:
more print enabled and no mail send
Returns
-------
None.
"""
# print argument for verification
if testmode == 1:
print("Import file: {}".format(argv[0]))
file = str(argv[0])
# launch check file entry
check(file, testmode, debug)
if __name__ == "__main__":
"""
Get arguments from command line and fixe value :
testmode :
This value is 0 by defaut and is to 1 if user launchscript on test mode:
print enabled and no mail send
debug :
This value is 0 by defaut and is to 1 if user launchscript on debug mode:
more print enabled and no mail send
call main with arguments
"""
# pretrieve argument, seach test mode and launch main
if "-t" in sys.argv:
testmode = 1
debug = 0
elif "--test" in sys.argv:
testmode = 1
debug = 0
elif "--debug" in sys.argv:
testmode = 1
debug = 1
else:
testmode = 0
debug = 0
matching = [cmd for cmd in sys.argv if ".txt" in cmd]
main(matching, testmode, debug)
| 29.096257
| 96
| 0.541996
|
import sys
from sys import exit
import os
import socket
import requests
import smtplib
import ssl
import dns.resolver
__author__ = "Benjamin Kittler"
__copyright__ = "Copyright 2021, KITTLER"
__credits__ = ["Benjamin Kittler"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Benjamin Kittler"
__email__ = "kittler @T. gmail. com"
__status__ = "integration"
smtp_address = 'smtp.gmail.com'
smtp_port = 465
email_address = 'EMAIL@EMAIL.COM'
email_password = 'PASSWORD'
def check(file_to_check, testmode, debug):
try:
file = open(file_to_check, "r")
except:
exit('open file failed')
lines = file.readlines()
file.close()
url_dict = {}
for line in lines:
line = line.replace("\n", "")
line = line.replace(" ", "\t")
line = line.replace("\t\t\t", "\t")
line = line.replace("\t\t", "\t")
line = line.replace("http://", "")
line = line.replace("https://", "")
element = line.split("\t")
cle = element[0]
data = element[1]
url_dict[cle] = data
if debug == 1:
print("Url dict : \n", url_dict)
if testmode == 1:
print("Check :")
for url, mail in url_dict.items():
if "ns://" not in url and "mx://" not in url and "ping://" not in url:
availability = str(request_url(url))
if (availability == ("200") or (availability == "301")
or (availability == "302")):
request_url_result = "UP"
else:
request_url_result = "DOWN"
if testmode == 1:
print("url : ", url, " -> mail : ", mail,
" Result :", request_url_result)
else:
if request_url_result == "DOWN":
alert_mail(mail, request_url_result, url)
elif "ns://" in url:
request_url_result = ping_name(url, "NS")
if testmode == 1:
print("url : ", url, " -> mail : ", mail,
" Result NS :", request_url_result)
else:
if request_url_result == "DOWN":
alert_mail(mail, request_url_result, url)
elif "mx://" in url:
request_url_result = ping_name(url, "MX")
if testmode == 1:
print("url : ", url, " -> mail : ", mail,
" Result MX :", request_url_result)
else:
if request_url_result == "DOWN":
alert_mail(mail, request_url_result, url)
elif "ping://" in url:
url = url.replace("ping://", "")
request_url_result = ping_ip(url)
if testmode == 1:
print("url : ", url, " -> mail : ", mail,
" Result Ping :", request_url_result)
else:
if request_url_result == "DOWN":
alert_mail(mail, request_url_result, url)
else:
if testmode == 1:
print("url : ", url, " -> mail : ", mail, "ignored")
exit()
def request_url(url):
try:
url = "https://" + format(url)
response = requests.head(url, allow_redirects=True, timeout=10)
except:
try:
url = "http://" + format(url)
response = requests.head(url, allow_redirects=True, timeout=10)
except:
return "404"
if response.status_code:
return response.status_code
else:
return "404"
def ping_name(name, dns_type):
name = name.replace("ns://", "")
name = name.replace("mx://", "")
if dns_type == "A":
try:
addr1 = socket.gethostbyname_ex(name)
print("Resolution -> {}".format(addr1[2]))
name = addr1[2]
except:
print("Resolution failed")
if dns_type == "MX":
try:
answers = dns.resolver.resolve(name, 'MX')
for rdata in answers:
addr1 = socket.gethostbyname_ex(str(rdata.exchange))
name = addr1[2]
if ping_ip(name) == "UP":
return "UP"
return ping_ip(name)
except:
print("Resolution failed")
return "DOWN"
if dns_type == "NS":
try:
answers = dns.resolver.resolve(name, 'NS')
for rdata in answers:
addr1 = socket.gethostbyname_ex(str(rdata.target))
name = addr1[2]
for srv in name:
if ping_ip(srv) == "UP":
return "UP"
return ping_ip(name)
except:
print("Resolution failed")
return "DOWN"
def ping_ip(name):
try:
name = str(name).strip('[]')
name = str(name).strip("''")
hostname = format(name)
response = os.system("ping -c 1 " + hostname + " > /dev/null 2>&1")
if response == 0:
return "UP"
else:
return "DOWN"
except requests.ConnectionError:
return "DOWN"
return "DOWN"
def alert_mail(email_receiver, service_status, url):
service_status = "Subject:{}\n\n".format(service_status) + "Server :{} \n".format(url)
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_address, smtp_port, context=context) as server:
server.login(email_address, email_password)
server.sendmail(email_address, email_receiver, service_status)
def main(argv, testmode, debug):
if testmode == 1:
print("Import file: {}".format(argv[0]))
file = str(argv[0])
check(file, testmode, debug)
if __name__ == "__main__":
if "-t" in sys.argv:
testmode = 1
debug = 0
elif "--test" in sys.argv:
testmode = 1
debug = 0
elif "--debug" in sys.argv:
testmode = 1
debug = 1
else:
testmode = 0
debug = 0
matching = [cmd for cmd in sys.argv if ".txt" in cmd]
main(matching, testmode, debug)
| true
| true
|
790519f577da64a77527e1f709fe47db6b6725cf
| 14,819
|
py
|
Python
|
mkt/reviewers/models.py
|
ngokevin/zamboni
|
a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/reviewers/models.py
|
ngokevin/zamboni
|
a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/reviewers/models.py
|
ngokevin/zamboni
|
a33dcd489175d8e7ba1c02ee4dabb6cfdc405e69
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
from django.conf import settings
from django.core.cache import cache
from django.db import models
from django.db.models import Sum
import commonware.log
import waffle
import amo
import mkt.constants.comm as comm
from amo.utils import cache_ns_key
from mkt.comm.utils import create_comm_note
from mkt.site.mail import send_mail_jinja
from mkt.site.models import ManagerBase, ModelBase, skip_cache
from mkt.tags.models import Tag
from mkt.translations.fields import save_signal, TranslatedField
from mkt.users.models import UserProfile
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.models import Webapp
user_log = commonware.log.getLogger('z.users')
QUEUE_TARAKO = 'tarako'
class CannedResponse(ModelBase):
name = TranslatedField()
response = TranslatedField(short=False)
sort_group = models.CharField(max_length=255)
class Meta:
db_table = 'cannedresponses'
def __unicode__(self):
return unicode(self.name)
models.signals.pre_save.connect(save_signal, sender=CannedResponse,
dispatch_uid='cannedresponses_translations')
class EditorSubscription(ModelBase):
user = models.ForeignKey(UserProfile)
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'editor_subscriptions'
class ReviewerScore(ModelBase):
user = models.ForeignKey(UserProfile, related_name='_reviewer_scores')
addon = models.ForeignKey(Webapp, blank=True, null=True, related_name='+')
score = models.SmallIntegerField()
# For automated point rewards.
note_key = models.SmallIntegerField(choices=amo.REVIEWED_CHOICES.items(),
default=0)
# For manual point rewards with a note.
note = models.CharField(max_length=255, blank=True)
class Meta:
db_table = 'reviewer_scores'
ordering = ('-created',)
@classmethod
def get_key(cls, key=None, invalidate=False):
namespace = 'riscore'
if not key: # Assuming we're invalidating the namespace.
cache_ns_key(namespace, invalidate)
return
else:
# Using cache_ns_key so each cache val is invalidated together.
ns_key = cache_ns_key(namespace, invalidate)
return '%s:%s' % (ns_key, key)
@classmethod
def get_event(cls, addon, status, **kwargs):
"""Return the review event type constant.
This is determined by the app type and the queue the addon is
currently in (which is determined from the status).
Note: We're not using addon.status because this is called after the
status has been updated by the reviewer action.
"""
if addon.is_packaged:
if status in amo.WEBAPPS_APPROVED_STATUSES:
return amo.REVIEWED_WEBAPP_UPDATE
else: # If it's not PUBLIC, assume it's a new submission.
return amo.REVIEWED_WEBAPP_PACKAGED
else: # It's a hosted app.
in_rereview = kwargs.pop('in_rereview', False)
if status in amo.WEBAPPS_APPROVED_STATUSES and in_rereview:
return amo.REVIEWED_WEBAPP_REREVIEW
else:
return amo.REVIEWED_WEBAPP_HOSTED
@classmethod
def award_points(cls, user, addon, status, **kwargs):
"""Awards points to user based on an event and the queue.
`event` is one of the `REVIEWED_` keys in constants.
`status` is one of the `STATUS_` keys in constants.
"""
event = cls.get_event(addon, status, **kwargs)
score = amo.REVIEWED_SCORES.get(event)
if score:
cls.objects.create(user=user, addon=addon, score=score,
note_key=event)
cls.get_key(invalidate=True)
user_log.info(
(u'Awarding %s points to user %s for "%s" for addon %s'
% (score, user, amo.REVIEWED_CHOICES[event], addon.id))
.encode('utf-8'))
return score
@classmethod
def award_moderation_points(cls, user, addon, review_id):
"""Awards points to user based on moderated review."""
event = amo.REVIEWED_APP_REVIEW
score = amo.REVIEWED_SCORES.get(event)
cls.objects.create(user=user, addon=addon, score=score, note_key=event)
cls.get_key(invalidate=True)
user_log.info(
u'Awarding %s points to user %s for "%s" for review %s' % (
score, user, amo.REVIEWED_CHOICES[event], review_id))
@classmethod
def get_total(cls, user):
"""Returns total points by user."""
key = cls.get_key('get_total:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = (ReviewerScore.objects.no_cache().filter(user=user)
.aggregate(total=Sum('score'))
.values())[0]
if val is None:
val = 0
cache.set(key, val, None)
return val
@classmethod
def get_recent(cls, user, limit=5):
"""Returns most recent ReviewerScore records."""
key = cls.get_key('get_recent:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = ReviewerScore.objects.no_cache().filter(user=user)
val = list(val[:limit])
cache.set(key, val, None)
return val
@classmethod
def get_performance(cls, user):
"""Returns sum of reviewer points."""
key = cls.get_key('get_performance:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s
ORDER BY `total` DESC
"""
with skip_cache():
val = list(ReviewerScore.objects.raw(sql, [user.id]))
cache.set(key, val, None)
return val
@classmethod
def get_performance_since(cls, user, since):
"""
Returns sum of reviewer points since the given datetime.
"""
key = cls.get_key('get_performance:%s:%s' % (user.id, since.isoformat()))
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s AND
`reviewer_scores`.`created` >= %s
ORDER BY `total` DESC
"""
with skip_cache():
val = list(ReviewerScore.objects.raw(sql, [user.id, since]))
cache.set(key, val, 3600)
return val
@classmethod
def _leaderboard_query(cls, since=None, types=None):
"""
Returns common SQL to leaderboard calls.
"""
query = (cls.objects
.values_list('user__id', 'user__display_name')
.annotate(total=Sum('score'))
.exclude(user__groups__name__in=('No Reviewer Incentives',
'Staff', 'Admins'))
.order_by('-total'))
if since is not None:
query = query.filter(created__gte=since)
if types is not None:
query = query.filter(note_key__in=types)
return query
@classmethod
def get_leaderboards(cls, user, days=7, types=None):
"""Returns leaderboards with ranking for the past given days.
This will return a dict of 3 items::
{'leader_top': [...],
'leader_near: [...],
'user_rank': (int)}
If the user is not in the leaderboard, or if the user is in the top 5,
'leader_near' will be an empty list and 'leader_top' will contain 5
elements instead of the normal 3.
"""
key = cls.get_key('get_leaderboards:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
week_ago = datetime.date.today() - datetime.timedelta(days=days)
leader_top = []
leader_near = []
query = cls._leaderboard_query(since=week_ago, types=types)
scores = []
user_rank = 0
in_leaderboard = False
for rank, row in enumerate(query, 1):
user_id, name, total = row
scores.append({
'user_id': user_id,
'name': name,
'rank': rank,
'total': int(total),
})
if user_id == user.id:
user_rank = rank
in_leaderboard = True
if not in_leaderboard:
leader_top = scores[:5]
else:
if user_rank <= 5: # User is in top 5, show top 5.
leader_top = scores[:5]
else:
leader_top = scores[:3]
leader_near = [scores[user_rank - 2], scores[user_rank - 1]]
try:
leader_near.append(scores[user_rank])
except IndexError:
pass # User is last on the leaderboard.
val = {
'leader_top': leader_top,
'leader_near': leader_near,
'user_rank': user_rank,
}
cache.set(key, val, None)
return val
@classmethod
def all_users_by_score(cls):
"""
Returns reviewers ordered by highest total points first.
"""
query = cls._leaderboard_query()
scores = []
for row in query:
user_id, name, total = row
user_level = len(amo.REVIEWED_LEVELS) - 1
for i, level in enumerate(amo.REVIEWED_LEVELS):
if total < level['points']:
user_level = i - 1
break
# Only show level if it changes.
if user_level < 0:
level = ''
else:
level = amo.REVIEWED_LEVELS[user_level]['name']
scores.append({
'user_id': user_id,
'name': name,
'total': int(total),
'level': level,
})
prev = None
for score in reversed(scores):
if score['level'] == prev:
score['level'] = ''
else:
prev = score['level']
return scores
class EscalationQueue(ModelBase):
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'escalation_queue'
class RereviewQueue(ModelBase):
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'rereview_queue'
@classmethod
def flag(cls, addon, event, message=None):
cls.objects.get_or_create(addon=addon)
if message:
amo.log(event, addon, addon.current_version,
details={'comments': message})
else:
amo.log(event, addon, addon.current_version)
# TODO: if we ever get rid of ActivityLog for reviewer notes, replace
# all flag calls to use the comm constant and not have to use
# ACTION_MAP.
create_comm_note(addon, addon.current_version, None, message,
note_type=comm.ACTION_MAP(event))
def send_tarako_mail(review):
if not waffle.switch_is_active('comm-dashboard'):
send_mail_jinja(
'Low-memory devices review {passed}'.format(
passed='passed' if review.passed else 'failed'),
'reviewers/emails/tarako_review_complete.txt',
{'review': review},
recipient_list=[a.email for a in review.app.authors.all()],
from_email=settings.MKT_REVIEWERS_EMAIL)
def tarako_passed(review):
"""Add the tarako tag to the app."""
tag = Tag(tag_text='tarako')
tag.save_tag(review.app)
WebappIndexer.index_ids([review.app.pk])
send_tarako_mail(review)
def tarako_failed(review):
"""Remove the tarako tag from the app."""
tag = Tag(tag_text='tarako')
tag.remove_tag(review.app)
WebappIndexer.index_ids([review.app.pk])
send_tarako_mail(review)
class AdditionalReviewManager(ManagerBase):
def unreviewed(self, queue, and_approved=False):
query = {
'passed': None,
'queue': queue,
}
if and_approved:
query['app__status__in'] = amo.WEBAPPS_APPROVED_STATUSES
return self.get_queryset().no_cache().filter(**query)
def latest_for_queue(self, queue):
try:
return self.get_queryset().filter(queue=queue).latest()
except AdditionalReview.DoesNotExist:
return None
class AdditionalReview(ModelBase):
app = models.ForeignKey(Webapp)
queue = models.CharField(max_length=30)
passed = models.NullBooleanField()
review_completed = models.DateTimeField(null=True)
comment = models.CharField(null=True, blank=True, max_length=255)
reviewer = models.ForeignKey('users.UserProfile', null=True, blank=True)
objects = AdditionalReviewManager()
class Meta:
db_table = 'additional_review'
get_latest_by = 'created'
@property
def pending(self):
return self.passed is None
@property
def failed(self):
return self.passed is False
def __init__(self, *args, **kwargs):
super(AdditionalReview, self).__init__(*args, **kwargs)
from mkt.reviewers.utils import log_reviewer_action
self.log_reviewer_action = log_reviewer_action
def execute_post_review_task(self):
"""
Call the correct post-review function for the queue.
"""
# TODO: Pull this function from somewhere based on self.queue.
if self.passed is None:
raise ValueError('cannot execute post-review task when unreviewed')
elif self.passed:
tarako_passed(self)
action = amo.LOG.PASS_ADDITIONAL_REVIEW
else:
tarako_failed(self)
action = amo.LOG.FAIL_ADDITIONAL_REVIEW
self.log_reviewer_action(
self.app, self.reviewer, self.comment or '', action,
queue=self.queue)
def cleanup_queues(sender, instance, **kwargs):
RereviewQueue.objects.filter(addon=instance).delete()
EscalationQueue.objects.filter(addon=instance).delete()
models.signals.post_delete.connect(cleanup_queues, sender=Webapp,
dispatch_uid='queue-addon-cleanup')
| 32.569231
| 81
| 0.590526
|
import datetime
from django.conf import settings
from django.core.cache import cache
from django.db import models
from django.db.models import Sum
import commonware.log
import waffle
import amo
import mkt.constants.comm as comm
from amo.utils import cache_ns_key
from mkt.comm.utils import create_comm_note
from mkt.site.mail import send_mail_jinja
from mkt.site.models import ManagerBase, ModelBase, skip_cache
from mkt.tags.models import Tag
from mkt.translations.fields import save_signal, TranslatedField
from mkt.users.models import UserProfile
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.models import Webapp
user_log = commonware.log.getLogger('z.users')
QUEUE_TARAKO = 'tarako'
class CannedResponse(ModelBase):
name = TranslatedField()
response = TranslatedField(short=False)
sort_group = models.CharField(max_length=255)
class Meta:
db_table = 'cannedresponses'
def __unicode__(self):
return unicode(self.name)
models.signals.pre_save.connect(save_signal, sender=CannedResponse,
dispatch_uid='cannedresponses_translations')
class EditorSubscription(ModelBase):
user = models.ForeignKey(UserProfile)
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'editor_subscriptions'
class ReviewerScore(ModelBase):
user = models.ForeignKey(UserProfile, related_name='_reviewer_scores')
addon = models.ForeignKey(Webapp, blank=True, null=True, related_name='+')
score = models.SmallIntegerField()
note_key = models.SmallIntegerField(choices=amo.REVIEWED_CHOICES.items(),
default=0)
note = models.CharField(max_length=255, blank=True)
class Meta:
db_table = 'reviewer_scores'
ordering = ('-created',)
@classmethod
def get_key(cls, key=None, invalidate=False):
namespace = 'riscore'
if not key:
cache_ns_key(namespace, invalidate)
return
else:
# Using cache_ns_key so each cache val is invalidated together.
ns_key = cache_ns_key(namespace, invalidate)
return '%s:%s' % (ns_key, key)
@classmethod
def get_event(cls, addon, status, **kwargs):
if addon.is_packaged:
if status in amo.WEBAPPS_APPROVED_STATUSES:
return amo.REVIEWED_WEBAPP_UPDATE
else: # If it's not PUBLIC, assume it's a new submission.
return amo.REVIEWED_WEBAPP_PACKAGED
else: # It's a hosted app.
in_rereview = kwargs.pop('in_rereview', False)
if status in amo.WEBAPPS_APPROVED_STATUSES and in_rereview:
return amo.REVIEWED_WEBAPP_REREVIEW
else:
return amo.REVIEWED_WEBAPP_HOSTED
@classmethod
def award_points(cls, user, addon, status, **kwargs):
event = cls.get_event(addon, status, **kwargs)
score = amo.REVIEWED_SCORES.get(event)
if score:
cls.objects.create(user=user, addon=addon, score=score,
note_key=event)
cls.get_key(invalidate=True)
user_log.info(
(u'Awarding %s points to user %s for "%s" for addon %s'
% (score, user, amo.REVIEWED_CHOICES[event], addon.id))
.encode('utf-8'))
return score
@classmethod
def award_moderation_points(cls, user, addon, review_id):
event = amo.REVIEWED_APP_REVIEW
score = amo.REVIEWED_SCORES.get(event)
cls.objects.create(user=user, addon=addon, score=score, note_key=event)
cls.get_key(invalidate=True)
user_log.info(
u'Awarding %s points to user %s for "%s" for review %s' % (
score, user, amo.REVIEWED_CHOICES[event], review_id))
@classmethod
def get_total(cls, user):
key = cls.get_key('get_total:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = (ReviewerScore.objects.no_cache().filter(user=user)
.aggregate(total=Sum('score'))
.values())[0]
if val is None:
val = 0
cache.set(key, val, None)
return val
@classmethod
def get_recent(cls, user, limit=5):
key = cls.get_key('get_recent:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = ReviewerScore.objects.no_cache().filter(user=user)
val = list(val[:limit])
cache.set(key, val, None)
return val
@classmethod
def get_performance(cls, user):
key = cls.get_key('get_performance:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s
ORDER BY `total` DESC
"""
with skip_cache():
val = list(ReviewerScore.objects.raw(sql, [user.id]))
cache.set(key, val, None)
return val
@classmethod
def get_performance_since(cls, user, since):
key = cls.get_key('get_performance:%s:%s' % (user.id, since.isoformat()))
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s AND
`reviewer_scores`.`created` >= %s
ORDER BY `total` DESC
"""
with skip_cache():
val = list(ReviewerScore.objects.raw(sql, [user.id, since]))
cache.set(key, val, 3600)
return val
@classmethod
def _leaderboard_query(cls, since=None, types=None):
query = (cls.objects
.values_list('user__id', 'user__display_name')
.annotate(total=Sum('score'))
.exclude(user__groups__name__in=('No Reviewer Incentives',
'Staff', 'Admins'))
.order_by('-total'))
if since is not None:
query = query.filter(created__gte=since)
if types is not None:
query = query.filter(note_key__in=types)
return query
@classmethod
def get_leaderboards(cls, user, days=7, types=None):
key = cls.get_key('get_leaderboards:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
week_ago = datetime.date.today() - datetime.timedelta(days=days)
leader_top = []
leader_near = []
query = cls._leaderboard_query(since=week_ago, types=types)
scores = []
user_rank = 0
in_leaderboard = False
for rank, row in enumerate(query, 1):
user_id, name, total = row
scores.append({
'user_id': user_id,
'name': name,
'rank': rank,
'total': int(total),
})
if user_id == user.id:
user_rank = rank
in_leaderboard = True
if not in_leaderboard:
leader_top = scores[:5]
else:
if user_rank <= 5:
leader_top = scores[:5]
else:
leader_top = scores[:3]
leader_near = [scores[user_rank - 2], scores[user_rank - 1]]
try:
leader_near.append(scores[user_rank])
except IndexError:
pass
val = {
'leader_top': leader_top,
'leader_near': leader_near,
'user_rank': user_rank,
}
cache.set(key, val, None)
return val
@classmethod
def all_users_by_score(cls):
query = cls._leaderboard_query()
scores = []
for row in query:
user_id, name, total = row
user_level = len(amo.REVIEWED_LEVELS) - 1
for i, level in enumerate(amo.REVIEWED_LEVELS):
if total < level['points']:
user_level = i - 1
break
if user_level < 0:
level = ''
else:
level = amo.REVIEWED_LEVELS[user_level]['name']
scores.append({
'user_id': user_id,
'name': name,
'total': int(total),
'level': level,
})
prev = None
for score in reversed(scores):
if score['level'] == prev:
score['level'] = ''
else:
prev = score['level']
return scores
class EscalationQueue(ModelBase):
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'escalation_queue'
class RereviewQueue(ModelBase):
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'rereview_queue'
@classmethod
def flag(cls, addon, event, message=None):
cls.objects.get_or_create(addon=addon)
if message:
amo.log(event, addon, addon.current_version,
details={'comments': message})
else:
amo.log(event, addon, addon.current_version)
create_comm_note(addon, addon.current_version, None, message,
note_type=comm.ACTION_MAP(event))
def send_tarako_mail(review):
if not waffle.switch_is_active('comm-dashboard'):
send_mail_jinja(
'Low-memory devices review {passed}'.format(
passed='passed' if review.passed else 'failed'),
'reviewers/emails/tarako_review_complete.txt',
{'review': review},
recipient_list=[a.email for a in review.app.authors.all()],
from_email=settings.MKT_REVIEWERS_EMAIL)
def tarako_passed(review):
tag = Tag(tag_text='tarako')
tag.save_tag(review.app)
WebappIndexer.index_ids([review.app.pk])
send_tarako_mail(review)
def tarako_failed(review):
tag = Tag(tag_text='tarako')
tag.remove_tag(review.app)
WebappIndexer.index_ids([review.app.pk])
send_tarako_mail(review)
class AdditionalReviewManager(ManagerBase):
def unreviewed(self, queue, and_approved=False):
query = {
'passed': None,
'queue': queue,
}
if and_approved:
query['app__status__in'] = amo.WEBAPPS_APPROVED_STATUSES
return self.get_queryset().no_cache().filter(**query)
def latest_for_queue(self, queue):
try:
return self.get_queryset().filter(queue=queue).latest()
except AdditionalReview.DoesNotExist:
return None
class AdditionalReview(ModelBase):
app = models.ForeignKey(Webapp)
queue = models.CharField(max_length=30)
passed = models.NullBooleanField()
review_completed = models.DateTimeField(null=True)
comment = models.CharField(null=True, blank=True, max_length=255)
reviewer = models.ForeignKey('users.UserProfile', null=True, blank=True)
objects = AdditionalReviewManager()
class Meta:
db_table = 'additional_review'
get_latest_by = 'created'
@property
def pending(self):
return self.passed is None
@property
def failed(self):
return self.passed is False
def __init__(self, *args, **kwargs):
super(AdditionalReview, self).__init__(*args, **kwargs)
from mkt.reviewers.utils import log_reviewer_action
self.log_reviewer_action = log_reviewer_action
def execute_post_review_task(self):
if self.passed is None:
raise ValueError('cannot execute post-review task when unreviewed')
elif self.passed:
tarako_passed(self)
action = amo.LOG.PASS_ADDITIONAL_REVIEW
else:
tarako_failed(self)
action = amo.LOG.FAIL_ADDITIONAL_REVIEW
self.log_reviewer_action(
self.app, self.reviewer, self.comment or '', action,
queue=self.queue)
def cleanup_queues(sender, instance, **kwargs):
RereviewQueue.objects.filter(addon=instance).delete()
EscalationQueue.objects.filter(addon=instance).delete()
models.signals.post_delete.connect(cleanup_queues, sender=Webapp,
dispatch_uid='queue-addon-cleanup')
| true
| true
|
79051a49194f4008ee31ecce59aa9ed0c04e3f09
| 5,667
|
py
|
Python
|
my_configs/new/mmdet/core/evaluation/class_names.py
|
UESTC-Liuxin/TianChi
|
d9f50236c2edea56f9520a6887098b469dbb0126
|
[
"Apache-2.0"
] | null | null | null |
my_configs/new/mmdet/core/evaluation/class_names.py
|
UESTC-Liuxin/TianChi
|
d9f50236c2edea56f9520a6887098b469dbb0126
|
[
"Apache-2.0"
] | null | null | null |
my_configs/new/mmdet/core/evaluation/class_names.py
|
UESTC-Liuxin/TianChi
|
d9f50236c2edea56f9520a6887098b469dbb0126
|
[
"Apache-2.0"
] | 1
|
2020-06-18T10:05:41.000Z
|
2020-06-18T10:05:41.000Z
|
import mmcv
def wider_face_classes():
return ['face']
def voc_classes():
return [
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
]
def imagenet_det_classes():
return [
'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo',
'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam',
'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap',
'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder',
'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito',
'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle',
'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker',
'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew',
'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper',
'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly',
'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig',
'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog',
'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart',
'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger',
'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim',
'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse',
'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle',
'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard',
'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can',
'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace',
'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume',
'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza',
'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine',
'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse',
'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator',
'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler',
'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver',
'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile',
'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula',
'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer',
'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine',
'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie',
'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet',
'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin',
'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft',
'whale', 'wine_bottle', 'zebra'
]
def imagenet_vid_classes():
return [
'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car',
'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda',
'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit',
'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle',
'watercraft', 'whale', 'zebra'
]
def coco_classes():
# return ['瓶盖破损','瓶盖变形','瓶盖坏边','瓶盖打旋','瓶盖断点','标贴歪斜','标贴起皱','标贴气泡','喷码正常','喷码异常']
return ['瓶盖破损', '瓶盖变形', '瓶盖坏边', '瓶盖打旋', '瓶盖断点' '喷码正常', '喷码异常']#pg
# return ['标贴歪斜', '标贴起皱', '标贴气泡']
# return [
# 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
# 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign',
# 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
# 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
# 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
# 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard',
# 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork',
# 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
# 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair',
# 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv',
# 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
# 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
# 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush'
# ]
def cityscapes_classes():
return [
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle'
]
dataset_aliases = {
'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'],
'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'],
'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'],
'coco': ['coco', 'mscoco', 'ms_coco'],
'wider_face': ['WIDERFaceDataset', 'wider_face', 'WDIERFace'],
'cityscapes': ['cityscapes']
}
def get_classes(dataset):
"""Get class names of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_classes()')
else:
raise ValueError('Unrecognized dataset: {}'.format(dataset))
else:
raise TypeError('dataset must a str, but got {}'.format(type(dataset)))
return labels
| 46.834711
| 84
| 0.575437
|
import mmcv
def wider_face_classes():
return ['face']
def voc_classes():
return [
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
]
def imagenet_det_classes():
return [
'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo',
'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam',
'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap',
'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder',
'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito',
'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle',
'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker',
'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew',
'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper',
'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly',
'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig',
'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog',
'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart',
'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger',
'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim',
'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse',
'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle',
'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard',
'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can',
'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace',
'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume',
'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza',
'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine',
'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse',
'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator',
'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler',
'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver',
'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile',
'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula',
'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer',
'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine',
'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie',
'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet',
'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin',
'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft',
'whale', 'wine_bottle', 'zebra'
]
def imagenet_vid_classes():
return [
'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car',
'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda',
'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit',
'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle',
'watercraft', 'whale', 'zebra'
]
def coco_classes():
return ['瓶盖破损', '瓶盖变形', '瓶盖坏边', '瓶盖打旋', '瓶盖断点' '喷码正常', '喷码异常']
def cityscapes_classes():
return [
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle'
]
dataset_aliases = {
'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'],
'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'],
'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'],
'coco': ['coco', 'mscoco', 'ms_coco'],
'wider_face': ['WIDERFaceDataset', 'wider_face', 'WDIERFace'],
'cityscapes': ['cityscapes']
}
def get_classes(dataset):
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_classes()')
else:
raise ValueError('Unrecognized dataset: {}'.format(dataset))
else:
raise TypeError('dataset must a str, but got {}'.format(type(dataset)))
return labels
| true
| true
|
79051a8255ffcf0096e3e128363c49d05aadf88e
| 1,641
|
py
|
Python
|
client_mining_p/miner.py
|
lambda-projects-lafriedel/Blockchain
|
507d5da1ee2ab86d25e056fec1fcddf024f5b607
|
[
"MIT"
] | null | null | null |
client_mining_p/miner.py
|
lambda-projects-lafriedel/Blockchain
|
507d5da1ee2ab86d25e056fec1fcddf024f5b607
|
[
"MIT"
] | null | null | null |
client_mining_p/miner.py
|
lambda-projects-lafriedel/Blockchain
|
507d5da1ee2ab86d25e056fec1fcddf024f5b607
|
[
"MIT"
] | null | null | null |
import hashlib
import requests
import sys
def valid_proof(last_proof, proof):
guess = f'{last_proof}{proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:6] == "000000"
def proof_of_work(last_proof):
"""
Simple Proof of Work Algorithm
- Find a number p' such that hash(pp') contains 6 leading
zeroes, where p is the previous p'
- p is the previous proof, and p' is the new proof
"""
print(f'\nSearch for proof initialized.\n')
proof = 0
while valid_proof(last_proof, proof) is False:
proof += 1
print(f'\nSearch for proof complete, proof is {proof}\n')
return proof
if __name__ == '__main__':
# What node are we interacting with?
if len(sys.argv) > 1:
node = sys.argv[1]
else:
node = "http://localhost:5000"
coins_mined = 0
# Run forever until interrupted
while True:
# Get the last proof from the server and look for a new one
proof = requests.get(url=node + '/last_proof')
new_proof = proof_of_work(proof.json()['proof'])
# When found, POST it to the server {"proof": new_proof}
data = {'proof': new_proof}
attempt = requests.post(url=node + '/mine',json=data)
# If the server responds with 'New Block Forged'
if attempt.json()['message'] == 'New Block Forged':
# add 1 to the number of coins mined and print it.
coins_mined += 1
print("TOTAL COINS MINED:", coins_mined)
else:
# else print the message from the server.
print(attempt.json()['message'])
| 29.303571
| 67
| 0.61365
|
import hashlib
import requests
import sys
def valid_proof(last_proof, proof):
guess = f'{last_proof}{proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:6] == "000000"
def proof_of_work(last_proof):
print(f'\nSearch for proof initialized.\n')
proof = 0
while valid_proof(last_proof, proof) is False:
proof += 1
print(f'\nSearch for proof complete, proof is {proof}\n')
return proof
if __name__ == '__main__':
if len(sys.argv) > 1:
node = sys.argv[1]
else:
node = "http://localhost:5000"
coins_mined = 0
while True:
proof = requests.get(url=node + '/last_proof')
new_proof = proof_of_work(proof.json()['proof'])
data = {'proof': new_proof}
attempt = requests.post(url=node + '/mine',json=data)
if attempt.json()['message'] == 'New Block Forged':
coins_mined += 1
print("TOTAL COINS MINED:", coins_mined)
else:
print(attempt.json()['message'])
| true
| true
|
79051b16cfff283628eada367d42ed2614b0854c
| 4,322
|
py
|
Python
|
botCmd.py
|
Bankde/Hack-me-bot
|
bb5cbc34eb1581a4b17388ac4b824d9a71e52c19
|
[
"Apache-2.0"
] | 7
|
2018-12-03T02:49:08.000Z
|
2022-01-30T20:56:43.000Z
|
botCmd.py
|
pich4ya/Hack-me-bot
|
9804bd51337669ed9127c35a2231227338b513c8
|
[
"Apache-2.0"
] | null | null | null |
botCmd.py
|
pich4ya/Hack-me-bot
|
9804bd51337669ed9127c35a2231227338b513c8
|
[
"Apache-2.0"
] | 3
|
2018-12-04T11:10:04.000Z
|
2018-12-26T03:39:53.000Z
|
import sqlite3
import os
MSG_HELP = """List of commands:
!help
List commands
!listAll
List all animals
!show <animal>
Give description
!getFlag
Give flag (Admin only)
!serverInfo
Give server info (Dragonite only)
!addAdmin <id>
Make user an admin (Dragonite only)
!hint
Give you a hint.
Source_code:
https://github.com/Bankde/Hack-me-bot"""
MSG_NO_DRAGONITE = "You're not Dragonite. Go away !!"
MSG_SEARCH_ERROR = "We cannot find this animal in our database"
MSG_NO_ADMIN = "You are not Admin. Go away !!"
MSG_ANIMAL_CMD = "Please specify animal: e.g. !show dog"
APP_DB = "app.db"
HINT_URL = "https://i.imgur.com/QPKpeJL.jpg"
def init():
serverInfo = os.getenv('SERVER_INFO', None)
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
values = (serverInfo,)
cursor.execute("UPDATE ServerInfo SET info=?", values)
conn.commit()
values = ("TestLogUser", "TestLogMsg", )
cursor.execute("INSERT INTO MsgLog VALUES (?,?)", values)
conn.commit()
conn.close()
# Log userId and their msg here
def _msgLog(user, msg):
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
values = (user, msg,)
# CREATE TABLE MsgLog (user TEXT, msg TEXT);
cursor.execute("INSERT INTO MsgLog VALUES (?,?)", values)
conn.commit()
conn.close()
# Show animal description
def _showAnimal(animal):
try:
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE Animals (animal TEXT UNIQUE, description TEXT);
cursor.execute("SELECT description FROM Animals WHERE animal='%s'" % (animal))
all_data = cursor.fetchone()
conn.close()
if all_data == None or len(all_data) == 0:
return MSG_SEARCH_ERROR
else:
return all_data[0]
except:
print("SQL error for arg: %s" % (animal))
return None
# List every animals
def _listAnimal():
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE Animals (animal TEXT UNIQUE, description TEXT);
cursor.execute("SELECT animal FROM Animals")
all_data = cursor.fetchall()
conn.close()
return ", ".join([data[0] for data in all_data])
# My own reminder
def _getServerInfo(user):
if user.lower() == "dragonite":
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE ServerInfo (info TEXT);
cursor.execute("SELECT info FROM ServerInfo")
all_data = cursor.fetchone()
conn.close()
return all_data[0]
else:
return MSG_NO_DRAGONITE
# You should ask Dragonite to add you to admin list
def _addAdmin(user, arg):
if user.lower() == "dragonite":
try:
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
values = (arg,)
# CREATE TABLE Admins (user TEXT PRIMARY KEY);
cursor.execute("INSERT INTO Admins VALUES (?)", values)
conn.commit()
conn.close()
return "Successfully add %s into admin" % (arg)
except:
return "You're already an admin"
else:
return MSG_NO_DRAGONITE
# Flag is secret. No one besides admin should see it.
def _getFlag(user):
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE Admins (user TEXT PRIMARY KEY);
cursor.execute("SELECT user FROM Admins WHERE user='%s'" % (user))
all_data = cursor.fetchone()
conn.close()
if all_data != None and len(all_data) == 1:
flag = os.getenv('FLAG', None)
return flag
else:
print("Alert: %s is not admin." % (user))
return MSG_NO_ADMIN
def runCmd(message, user):
_msgLog(user, message)
if message.lower() == "help" or message.lower() == "!help":
return MSG_HELP
elif message == "!listAll":
return _listAnimal()
elif message == ("!show"):
return MSG_ANIMAL_CMD
elif message.startswith("!show "):
return _showAnimal(message[6:])
elif message == "!serverInfo":
return _getServerInfo(user)
elif message == "!getFlag":
return _getFlag(user)
elif message[:10] == "!addAdmin ":
arg = message[10:]
return _addAdmin(user, arg)
elif message == "!hint":
return HINT_URL
else:
return ""
| 29.401361
| 86
| 0.621472
|
import sqlite3
import os
MSG_HELP = """List of commands:
!help
List commands
!listAll
List all animals
!show <animal>
Give description
!getFlag
Give flag (Admin only)
!serverInfo
Give server info (Dragonite only)
!addAdmin <id>
Make user an admin (Dragonite only)
!hint
Give you a hint.
Source_code:
https://github.com/Bankde/Hack-me-bot"""
MSG_NO_DRAGONITE = "You're not Dragonite. Go away !!"
MSG_SEARCH_ERROR = "We cannot find this animal in our database"
MSG_NO_ADMIN = "You are not Admin. Go away !!"
MSG_ANIMAL_CMD = "Please specify animal: e.g. !show dog"
APP_DB = "app.db"
HINT_URL = "https://i.imgur.com/QPKpeJL.jpg"
def init():
serverInfo = os.getenv('SERVER_INFO', None)
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
values = (serverInfo,)
cursor.execute("UPDATE ServerInfo SET info=?", values)
conn.commit()
values = ("TestLogUser", "TestLogMsg", )
cursor.execute("INSERT INTO MsgLog VALUES (?,?)", values)
conn.commit()
conn.close()
# Log userId and their msg here
def _msgLog(user, msg):
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
values = (user, msg,)
# CREATE TABLE MsgLog (user TEXT, msg TEXT);
cursor.execute("INSERT INTO MsgLog VALUES (?,?)", values)
conn.commit()
conn.close()
# Show animal description
def _showAnimal(animal):
try:
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE Animals (animal TEXT UNIQUE, description TEXT);
cursor.execute("SELECT description FROM Animals WHERE animal='%s'" % (animal))
all_data = cursor.fetchone()
conn.close()
if all_data == None or len(all_data) == 0:
return MSG_SEARCH_ERROR
else:
return all_data[0]
except:
print("SQL error for arg: %s" % (animal))
return None
# List every animals
def _listAnimal():
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE Animals (animal TEXT UNIQUE, description TEXT);
cursor.execute("SELECT animal FROM Animals")
all_data = cursor.fetchall()
conn.close()
return ", ".join([data[0] for data in all_data])
# My own reminder
def _getServerInfo(user):
if user.lower() == "dragonite":
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE ServerInfo (info TEXT);
cursor.execute("SELECT info FROM ServerInfo")
all_data = cursor.fetchone()
conn.close()
return all_data[0]
else:
return MSG_NO_DRAGONITE
# You should ask Dragonite to add you to admin list
def _addAdmin(user, arg):
if user.lower() == "dragonite":
try:
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
values = (arg,)
# CREATE TABLE Admins (user TEXT PRIMARY KEY);
cursor.execute("INSERT INTO Admins VALUES (?)", values)
conn.commit()
conn.close()
return "Successfully add %s into admin" % (arg)
except:
return "You're already an admin"
else:
return MSG_NO_DRAGONITE
def _getFlag(user):
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
cursor.execute("SELECT user FROM Admins WHERE user='%s'" % (user))
all_data = cursor.fetchone()
conn.close()
if all_data != None and len(all_data) == 1:
flag = os.getenv('FLAG', None)
return flag
else:
print("Alert: %s is not admin." % (user))
return MSG_NO_ADMIN
def runCmd(message, user):
_msgLog(user, message)
if message.lower() == "help" or message.lower() == "!help":
return MSG_HELP
elif message == "!listAll":
return _listAnimal()
elif message == ("!show"):
return MSG_ANIMAL_CMD
elif message.startswith("!show "):
return _showAnimal(message[6:])
elif message == "!serverInfo":
return _getServerInfo(user)
elif message == "!getFlag":
return _getFlag(user)
elif message[:10] == "!addAdmin ":
arg = message[10:]
return _addAdmin(user, arg)
elif message == "!hint":
return HINT_URL
else:
return ""
| true
| true
|
79051b448171478abd0070862a6e9dcb4048523f
| 2,226
|
py
|
Python
|
python-lib/example-consumer.py
|
playasystems/hacks
|
5fa39f6525706e502674c5aac422f80c66343416
|
[
"MIT"
] | null | null | null |
python-lib/example-consumer.py
|
playasystems/hacks
|
5fa39f6525706e502674c5aac422f80c66343416
|
[
"MIT"
] | null | null | null |
python-lib/example-consumer.py
|
playasystems/hacks
|
5fa39f6525706e502674c5aac422f80c66343416
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# -*- coding: utf8 -*-
# Virtual dancers that consumes real GigglePixel packets
#
# To use, start this up and then bring up a server broadcasting GigglePixel.
# When this receives a palette packet, the dancing pair (whose humble wearables
# are only capable of displaying one color at a time apiece) will light up
# to match the first two elements of the packet received. When an ID packet
# is received, they will shout their love of the sender.
PORT = 7016
import socket
import sys
from time import time
from x256 import x256
from udp import *
WHITE = '\033[0m'
RGB1 = None
RGB2 = None
banner = "Yay"
note = u'♪'
face = u'(・o・)'
# Print without newline
def p(s):
sys.stdout.write(s)
# Return a two-element array showing current arm position, and toggle it for next time
arm_phase = False
def arms():
global arm_phase
arm_phase = not arm_phase
if arm_phase:
return u'┏┛'
else:
return u'┗┓'
# Take an RGB value and return an ANSI escape sequence to show it in the terminal
def color(rgb):
if rgb is None:
return ""
ix = x256.from_rgb(*rgb)
return "\033[38;5;%dm" % ix
# Draw the dancers
def draw():
l, r = arms()
p (color(RGB1) + l + face + r + WHITE + ' ' + note + ' ')
l, r = arms()
p (color(RGB2) + l + face + r + WHITE + " -" + banner + "!")
p ("\n\033[1A") # Keep drawing over and over on the same line
def handle_packet(gp):
global banner
global RGB1
global RGB2
if gp is None: return
if gp.packet_type == "PALETTE":
entries = gp.payload["entries"]
if len(entries) < 1:
return
elif len(entries) == 1:
entries.extend(entries)
RGB1 = (entries[0]["red"], entries[0]["green"], entries[0]["blue"])
RGB2 = (entries[1]["red"], entries[1]["green"], entries[1]["blue"])
elif gp.packet_type == "ID":
banner = "We love " + gp.payload["name"]
next_dance = time()
listener = GigglePixelListener()
try:
while True:
draw()
now = time()
time_left = next_dance - now
gp = None
if time_left > 0:
gp = listener.get_packet(time_left)
handle_packet(gp)
if gp is None:
next_dance = time() + 1
arms() # Toggle arm positions
except KeyboardInterrupt:
print (WHITE)
| 24.195652
| 86
| 0.649146
|
PORT = 7016
import socket
import sys
from time import time
from x256 import x256
from udp import *
WHITE = '\033[0m'
RGB1 = None
RGB2 = None
banner = "Yay"
note = u'♪'
face = u'(・o・)'
def p(s):
sys.stdout.write(s)
arm_phase = False
def arms():
global arm_phase
arm_phase = not arm_phase
if arm_phase:
return u'┏┛'
else:
return u'┗┓'
def color(rgb):
if rgb is None:
return ""
ix = x256.from_rgb(*rgb)
return "\033[38;5;%dm" % ix
def draw():
l, r = arms()
p (color(RGB1) + l + face + r + WHITE + ' ' + note + ' ')
l, r = arms()
p (color(RGB2) + l + face + r + WHITE + " -" + banner + "!")
p ("\n\033[1A")
def handle_packet(gp):
global banner
global RGB1
global RGB2
if gp is None: return
if gp.packet_type == "PALETTE":
entries = gp.payload["entries"]
if len(entries) < 1:
return
elif len(entries) == 1:
entries.extend(entries)
RGB1 = (entries[0]["red"], entries[0]["green"], entries[0]["blue"])
RGB2 = (entries[1]["red"], entries[1]["green"], entries[1]["blue"])
elif gp.packet_type == "ID":
banner = "We love " + gp.payload["name"]
next_dance = time()
listener = GigglePixelListener()
try:
while True:
draw()
now = time()
time_left = next_dance - now
gp = None
if time_left > 0:
gp = listener.get_packet(time_left)
handle_packet(gp)
if gp is None:
next_dance = time() + 1
arms()
except KeyboardInterrupt:
print (WHITE)
| true
| true
|
79051cdd681d6dd17b5faec1bbc03d2c7f12fa19
| 7,368
|
py
|
Python
|
examples/dump_pcapng_info_pretty.py
|
dieter-exc/python-pcapng
|
59ff754d424c0542bc6d7b87e2b0adb721a7b73a
|
[
"Apache-2.0"
] | 82
|
2015-02-18T01:45:48.000Z
|
2022-01-25T03:37:11.000Z
|
examples/dump_pcapng_info_pretty.py
|
dieter-exc/python-pcapng
|
59ff754d424c0542bc6d7b87e2b0adb721a7b73a
|
[
"Apache-2.0"
] | 31
|
2015-02-09T09:01:42.000Z
|
2022-03-31T08:09:58.000Z
|
examples/dump_pcapng_info_pretty.py
|
dieter-exc/python-pcapng
|
59ff754d424c0542bc6d7b87e2b0adb721a7b73a
|
[
"Apache-2.0"
] | 35
|
2015-02-04T21:34:16.000Z
|
2022-03-23T00:41:44.000Z
|
#!/usr/bin/env python
import io
import sys
from datetime import datetime
# To make sure all packet types are available
import scapy.all # noqa
import scapy.packet
from scapy.layers.l2 import Ether
import pcapng
from pcapng.blocks import EnhancedPacket, InterfaceDescription, SectionHeader
def col256(text, fg=None, bg=None, bold=False):
def _get_color(col):
return "8;5;{0:d}".format(_to_color(col))
def _to_color(num):
if isinstance(num, int):
return num # Assume it is already a color
if isinstance(num, str) and len(num) <= 3:
return 16 + int(num, 6)
raise ValueError("Invalid color: {0!r}".format(num))
if not isinstance(text, str):
text = repr(text)
buf = io.StringIO()
if bold:
buf.write("\x1b[1m")
if fg is not None:
buf.write("\x1b[3{0}m".format(_get_color(fg)))
if bg is not None:
buf.write("\x1b[4{0}m".format(_get_color(bg)))
buf.write(text)
buf.write("\x1b[0m")
return buf.getvalue()
def dump_information(scanner):
for block in scanner:
if isinstance(block, SectionHeader):
pprint_sectionheader(block)
elif isinstance(block, InterfaceDescription):
pprint_interfacedesc(block)
elif isinstance(block, EnhancedPacket):
pprint_enhanced_packet(block)
else:
print(" " + str(block))
def pprint_options(options):
if len(options):
yield "--"
for key, values in options.iter_all_items():
for value in values:
yield col256(key + ":", bold=True, fg="453")
yield col256(str(value), fg="340")
def pprint_sectionheader(block):
endianness_desc = {
"<": "Little endian",
">": "Big endian",
"!": "Network (Big endian)",
"=": "Native",
}
text = [
col256(" Section ", bg="400", fg="550"),
col256("version:", bold=True),
col256(".".join(str(x) for x in block.version), fg="145"),
# col256('endianness:', bold=True),
"-",
col256(endianness_desc.get(block.endianness, "Unknown endianness"), bold=True),
"-",
]
if block.length < 0:
text.append(col256("unspecified size", bold=True))
else:
text.append(col256("length:", bold=True))
text.append(col256(str(block.length), fg="145"))
text.extend(pprint_options(block.options))
print(" ".join(text))
def pprint_interfacedesc(block):
text = [
col256(" Interface #{0} ".format(block.interface_id), bg="010", fg="453"),
col256("Link type:", bold=True),
col256(str(block.link_type), fg="140"),
col256(block.link_type_description, fg="145"),
col256("Snap length:", bold=True),
col256(str(block.snaplen), fg="145"),
]
text.extend(pprint_options(block.options))
print(" ".join(text))
def pprint_enhanced_packet(block):
text = [
col256(" Packet+ ", bg="001", fg="345"),
# col256('NIC:', bold=True),
# col256(str(block.interface_id), fg='145'),
col256(str(block.interface.options["if_name"]), fg="140"),
col256(
str(
datetime.utcfromtimestamp(block.timestamp).strftime("%Y-%m-%d %H:%M:%S")
),
fg="455",
),
]
try:
text.extend(
[
col256("NIC:", bold=True),
col256(block.interface_id, fg="145"),
col256(block.interface.options["if_name"], fg="140"),
]
)
except KeyError:
pass
text.extend(
[
# col256('Size:', bold=True),
col256(str(block.packet_len) + " bytes", fg="025")
]
)
if block.captured_len != block.packet_len:
text.extend(
[
col256("Truncated to:", bold=True),
col256(str(block.captured_len) + "bytes", fg="145"),
]
)
text.extend(pprint_options(block.options))
print(" ".join(text))
if block.interface.link_type == 1:
# print(repr(block.packet_data))
# print(col256(repr(Ether(block.packet_data)), fg='255'))
_info = format_packet_information(block.packet_data)
print("\n".join(" " + line for line in _info))
else:
print(" Printing information for non-ethernet packets")
print(" is not supported yet.")
# print('\n'.join(' ' + line
# for line in format_binary_data(block.packet_data)))
def format_packet_information(packet_data):
decoded = Ether(packet_data)
return format_scapy_packet(decoded)
def format_scapy_packet(packet):
fields = []
for f in packet.fields_desc:
# if isinstance(f, ConditionalField) and not f._evalcond(self):
# continue
if f.name in packet.fields:
val = f.i2repr(packet, packet.fields[f.name])
elif f.name in packet.overloaded_fields:
val = f.i2repr(packet, packet.overloaded_fields[f.name])
else:
continue
fields.append("{0}={1}".format(col256(f.name, "542"), col256(val, "352")))
yield "{0} {1}".format(col256(packet.__class__.__name__, "501"), " ".join(fields))
if packet.payload:
if isinstance(packet.payload, scapy.packet.Raw):
raw_data = str(packet.payload)
for line in make_printable(raw_data).splitlines():
yield " " + line
# for line in format_binary_data(raw_data):
# yield ' ' + line
elif isinstance(packet.payload, scapy.packet.Packet):
for line in format_scapy_packet(packet.payload):
yield " " + line
else:
for line in repr(packet.payload).splitlines():
yield " " + line
def make_printable(data): # todo: preserve unicode
stream = io.StringIO()
for ch in data:
if ch == "\\":
stream.write("\\\\")
elif ch in "\n\r" or (32 <= ord(ch) <= 126):
stream.write(ch)
else:
stream.write("\\x{0:02x}".format(ord(ch)))
return stream.getvalue()
def format_binary_data(data):
stream = io.BytesIO(data)
row_offset = 0
row_size = 16 # bytes
while True:
data = stream.read(row_size)
if not data:
return
hexrow = io.BytesIO()
asciirow = io.BytesIO()
for i, byte in enumerate(data):
if 32 <= ord(byte) <= 126:
asciirow.write(byte)
else:
asciirow.write(".")
hexrow.write(format(ord(byte), "02x"))
if i < 15:
if i % 2 == 1:
hexrow.write(" ")
if i % 8 == 7:
hexrow.write(" ")
row_offset += 1
yield "{0:08x}: {1:40s} {2:16s}".format(
row_offset, hexrow.getvalue(), asciirow.getvalue()
)
def main():
if (len(sys.argv) > 1) and (sys.argv[1] != "-"):
with open(sys.argv[1], "rb") as fp:
scanner = pcapng.FileScanner(fp)
dump_information(scanner)
else:
scanner = pcapng.FileScanner(sys.stdin)
dump_information(scanner)
if __name__ == "__main__":
main()
| 28.015209
| 88
| 0.549946
|
import io
import sys
from datetime import datetime
import scapy.all
import scapy.packet
from scapy.layers.l2 import Ether
import pcapng
from pcapng.blocks import EnhancedPacket, InterfaceDescription, SectionHeader
def col256(text, fg=None, bg=None, bold=False):
def _get_color(col):
return "8;5;{0:d}".format(_to_color(col))
def _to_color(num):
if isinstance(num, int):
return num
if isinstance(num, str) and len(num) <= 3:
return 16 + int(num, 6)
raise ValueError("Invalid color: {0!r}".format(num))
if not isinstance(text, str):
text = repr(text)
buf = io.StringIO()
if bold:
buf.write("\x1b[1m")
if fg is not None:
buf.write("\x1b[3{0}m".format(_get_color(fg)))
if bg is not None:
buf.write("\x1b[4{0}m".format(_get_color(bg)))
buf.write(text)
buf.write("\x1b[0m")
return buf.getvalue()
def dump_information(scanner):
for block in scanner:
if isinstance(block, SectionHeader):
pprint_sectionheader(block)
elif isinstance(block, InterfaceDescription):
pprint_interfacedesc(block)
elif isinstance(block, EnhancedPacket):
pprint_enhanced_packet(block)
else:
print(" " + str(block))
def pprint_options(options):
if len(options):
yield "--"
for key, values in options.iter_all_items():
for value in values:
yield col256(key + ":", bold=True, fg="453")
yield col256(str(value), fg="340")
def pprint_sectionheader(block):
endianness_desc = {
"<": "Little endian",
">": "Big endian",
"!": "Network (Big endian)",
"=": "Native",
}
text = [
col256(" Section ", bg="400", fg="550"),
col256("version:", bold=True),
col256(".".join(str(x) for x in block.version), fg="145"),
"-",
col256(endianness_desc.get(block.endianness, "Unknown endianness"), bold=True),
"-",
]
if block.length < 0:
text.append(col256("unspecified size", bold=True))
else:
text.append(col256("length:", bold=True))
text.append(col256(str(block.length), fg="145"))
text.extend(pprint_options(block.options))
print(" ".join(text))
def pprint_interfacedesc(block):
text = [
col256(" Interface #{0} ".format(block.interface_id), bg="010", fg="453"),
col256("Link type:", bold=True),
col256(str(block.link_type), fg="140"),
col256(block.link_type_description, fg="145"),
col256("Snap length:", bold=True),
col256(str(block.snaplen), fg="145"),
]
text.extend(pprint_options(block.options))
print(" ".join(text))
def pprint_enhanced_packet(block):
text = [
col256(" Packet+ ", bg="001", fg="345"),
col256(str(block.interface.options["if_name"]), fg="140"),
col256(
str(
datetime.utcfromtimestamp(block.timestamp).strftime("%Y-%m-%d %H:%M:%S")
),
fg="455",
),
]
try:
text.extend(
[
col256("NIC:", bold=True),
col256(block.interface_id, fg="145"),
col256(block.interface.options["if_name"], fg="140"),
]
)
except KeyError:
pass
text.extend(
[
col256(str(block.packet_len) + " bytes", fg="025")
]
)
if block.captured_len != block.packet_len:
text.extend(
[
col256("Truncated to:", bold=True),
col256(str(block.captured_len) + "bytes", fg="145"),
]
)
text.extend(pprint_options(block.options))
print(" ".join(text))
if block.interface.link_type == 1:
_info = format_packet_information(block.packet_data)
print("\n".join(" " + line for line in _info))
else:
print(" Printing information for non-ethernet packets")
print(" is not supported yet.")
def format_packet_information(packet_data):
decoded = Ether(packet_data)
return format_scapy_packet(decoded)
def format_scapy_packet(packet):
fields = []
for f in packet.fields_desc:
if f.name in packet.fields:
val = f.i2repr(packet, packet.fields[f.name])
elif f.name in packet.overloaded_fields:
val = f.i2repr(packet, packet.overloaded_fields[f.name])
else:
continue
fields.append("{0}={1}".format(col256(f.name, "542"), col256(val, "352")))
yield "{0} {1}".format(col256(packet.__class__.__name__, "501"), " ".join(fields))
if packet.payload:
if isinstance(packet.payload, scapy.packet.Raw):
raw_data = str(packet.payload)
for line in make_printable(raw_data).splitlines():
yield " " + line
elif isinstance(packet.payload, scapy.packet.Packet):
for line in format_scapy_packet(packet.payload):
yield " " + line
else:
for line in repr(packet.payload).splitlines():
yield " " + line
def make_printable(data):
stream = io.StringIO()
for ch in data:
if ch == "\\":
stream.write("\\\\")
elif ch in "\n\r" or (32 <= ord(ch) <= 126):
stream.write(ch)
else:
stream.write("\\x{0:02x}".format(ord(ch)))
return stream.getvalue()
def format_binary_data(data):
stream = io.BytesIO(data)
row_offset = 0
row_size = 16
while True:
data = stream.read(row_size)
if not data:
return
hexrow = io.BytesIO()
asciirow = io.BytesIO()
for i, byte in enumerate(data):
if 32 <= ord(byte) <= 126:
asciirow.write(byte)
else:
asciirow.write(".")
hexrow.write(format(ord(byte), "02x"))
if i < 15:
if i % 2 == 1:
hexrow.write(" ")
if i % 8 == 7:
hexrow.write(" ")
row_offset += 1
yield "{0:08x}: {1:40s} {2:16s}".format(
row_offset, hexrow.getvalue(), asciirow.getvalue()
)
def main():
if (len(sys.argv) > 1) and (sys.argv[1] != "-"):
with open(sys.argv[1], "rb") as fp:
scanner = pcapng.FileScanner(fp)
dump_information(scanner)
else:
scanner = pcapng.FileScanner(sys.stdin)
dump_information(scanner)
if __name__ == "__main__":
main()
| true
| true
|
79051e41b6e05f3a229188ef0440c645ab8f212f
| 2,284
|
py
|
Python
|
alembic/env.py
|
webhacking/finance
|
f6063af2b0cc949a3faaf081587a5504b0783a8c
|
[
"BSD-4-Clause"
] | null | null | null |
alembic/env.py
|
webhacking/finance
|
f6063af2b0cc949a3faaf081587a5504b0783a8c
|
[
"BSD-4-Clause"
] | null | null | null |
alembic/env.py
|
webhacking/finance
|
f6063af2b0cc949a3faaf081587a5504b0783a8c
|
[
"BSD-4-Clause"
] | null | null | null |
from __future__ import with_statement
import os
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
from finance.models import db
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = db.Model.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# Loads database URL from an environment variable
if bool(config.get_main_option("pytest.istest")):
config.set_main_option("sqlalchemy.url", os.environ["SBF_TEST_DB_URL"])
else:
config.set_main_option("sqlalchemy.url", os.environ["SBF_DB_URL"])
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 28.55
| 83
| 0.736427
|
from __future__ import with_statement
import os
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
from finance.models import db
config = context.config
fileConfig(config.config_file_name)
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = db.Model.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
# Loads database URL from an environment variable
if bool(config.get_main_option("pytest.istest")):
config.set_main_option("sqlalchemy.url", os.environ["SBF_TEST_DB_URL"])
else:
config.set_main_option("sqlalchemy.url", os.environ["SBF_DB_URL"])
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| true
| true
|
79051e6ef50a3db863bcf233cbabfd4e6a7b0c61
| 10,901
|
py
|
Python
|
sdk/python/pulumi_azure_native/machinelearningservices/v20210101/machine_learning_compute.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/machinelearningservices/v20210101/machine_learning_compute.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/machinelearningservices/v20210101/machine_learning_compute.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['MachineLearningCompute']
class MachineLearningCompute(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['AKSArgs'], pulumi.InputType['AmlComputeArgs'], pulumi.InputType['ComputeInstanceArgs'], pulumi.InputType['DataFactoryArgs'], pulumi.InputType['DataLakeAnalyticsArgs'], pulumi.InputType['DatabricksArgs'], pulumi.InputType['HDInsightArgs'], pulumi.InputType['VirtualMachineArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Machine Learning compute object wrapped into ARM resource envelope.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] compute_name: Name of the Azure Machine Learning compute.
:param pulumi.Input[pulumi.InputType['IdentityArgs']] identity: The identity of the resource.
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input[Union[pulumi.InputType['AKSArgs'], pulumi.InputType['AmlComputeArgs'], pulumi.InputType['ComputeInstanceArgs'], pulumi.InputType['DataFactoryArgs'], pulumi.InputType['DataLakeAnalyticsArgs'], pulumi.InputType['DatabricksArgs'], pulumi.InputType['HDInsightArgs'], pulumi.InputType['VirtualMachineArgs']]] properties: Compute properties
:param pulumi.Input[str] resource_group_name: Name of the resource group in which workspace is located.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sku of the workspace.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Contains resource tags defined as key/value pairs.
:param pulumi.Input[str] workspace_name: Name of Azure Machine Learning workspace.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['compute_name'] = compute_name
__props__['identity'] = identity
__props__['location'] = location
__props__['properties'] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['sku'] = sku
__props__['tags'] = tags
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__['workspace_name'] = workspace_name
__props__['name'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20210101:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/latest:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/latest:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20180301preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20180301preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20181119:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20181119:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20190501:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20190501:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20190601:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20190601:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20191101:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20191101:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200101:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200101:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200218preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200218preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200301:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200301:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200401:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200401:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200501preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200501preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200515preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200515preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200601:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200601:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200801:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200801:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200901preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200901preview:MachineLearningCompute")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(MachineLearningCompute, __self__).__init__(
'azure-native:machinelearningservices/v20210101:MachineLearningCompute',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'MachineLearningCompute':
"""
Get an existing MachineLearningCompute resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["identity"] = None
__props__["location"] = None
__props__["name"] = None
__props__["properties"] = None
__props__["sku"] = None
__props__["system_data"] = None
__props__["tags"] = None
__props__["type"] = None
return MachineLearningCompute(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Any]:
"""
Compute properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Read only system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 59.895604
| 3,183
| 0.710669
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['MachineLearningCompute']
class MachineLearningCompute(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compute_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['AKSArgs'], pulumi.InputType['AmlComputeArgs'], pulumi.InputType['ComputeInstanceArgs'], pulumi.InputType['DataFactoryArgs'], pulumi.InputType['DataLakeAnalyticsArgs'], pulumi.InputType['DatabricksArgs'], pulumi.InputType['HDInsightArgs'], pulumi.InputType['VirtualMachineArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['compute_name'] = compute_name
__props__['identity'] = identity
__props__['location'] = location
__props__['properties'] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['sku'] = sku
__props__['tags'] = tags
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__['workspace_name'] = workspace_name
__props__['name'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20210101:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/latest:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/latest:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20180301preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20180301preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20181119:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20181119:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20190501:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20190501:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20190601:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20190601:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20191101:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20191101:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200101:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200101:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200218preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200218preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200301:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200301:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200401:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200401:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200501preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200501preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200515preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200515preview:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200601:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200601:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200801:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200801:MachineLearningCompute"), pulumi.Alias(type_="azure-native:machinelearningservices/v20200901preview:MachineLearningCompute"), pulumi.Alias(type_="azure-nextgen:machinelearningservices/v20200901preview:MachineLearningCompute")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(MachineLearningCompute, __self__).__init__(
'azure-native:machinelearningservices/v20210101:MachineLearningCompute',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'MachineLearningCompute':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["identity"] = None
__props__["location"] = None
__props__["name"] = None
__props__["properties"] = None
__props__["sku"] = None
__props__["system_data"] = None
__props__["tags"] = None
__props__["type"] = None
return MachineLearningCompute(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityResponse']]:
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Any]:
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true
| true
|
79051e7fed81fcd045eb75c2d0b4b3d3845092ea
| 3,222
|
py
|
Python
|
webapp2/settings.py
|
ndavilo/webapp2
|
65fad8328675dd7fa0210ec0fc85fd291887afb5
|
[
"MIT"
] | null | null | null |
webapp2/settings.py
|
ndavilo/webapp2
|
65fad8328675dd7fa0210ec0fc85fd291887afb5
|
[
"MIT"
] | null | null | null |
webapp2/settings.py
|
ndavilo/webapp2
|
65fad8328675dd7fa0210ec0fc85fd291887afb5
|
[
"MIT"
] | null | null | null |
"""
Django settings for webapp2 project.
Generated by 'django-admin startproject' using Django 4.0.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-jtp=j6oy)@&t#9l$zv#1iavkq#l-#9f$*z97d@623=nzeo@pgm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webapp2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webapp2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.983871
| 91
| 0.701117
|
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-jtp=j6oy)@&t#9l$zv#1iavkq#l-#9f$*z97d@623=nzeo@pgm'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'webapp2.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webapp2.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| true
| true
|
79051fae535efc193413e9130d22dc69d49c89c6
| 947
|
py
|
Python
|
php4dvd/conftest.py
|
sargm/selenium-py-traning-barancev
|
a4b2d75c2b15d64b80759ea48867b79a03482328
|
[
"Apache-2.0"
] | null | null | null |
php4dvd/conftest.py
|
sargm/selenium-py-traning-barancev
|
a4b2d75c2b15d64b80759ea48867b79a03482328
|
[
"Apache-2.0"
] | null | null | null |
php4dvd/conftest.py
|
sargm/selenium-py-traning-barancev
|
a4b2d75c2b15d64b80759ea48867b79a03482328
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from selenium import webdriver
from model.application import Application
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="firefox", help="browser type")
parser.addoption("--base_url", action="store", default="http://localhost:9080/php4dvd/", help="base URL")
@pytest.fixture(scope="session")
def browser_type(request):
return request.config.getoption("--browser")
@pytest.fixture(scope="session")
def base_url(request):
return request.config.getoption("--base_url")
@pytest.fixture(scope="session")
def app(request, browser_type, base_url):
if browser_type == "firefox":
driver = webdriver.Firefox()
elif browser_type == "chrome":
driver = webdriver.Chrome()
elif browser_type == "ie":
driver = webdriver.Ie()
#driver.implicitly_wait(30)
request.addfinalizer(driver.quit) #close brawser
return Application(driver, base_url)
| 30.548387
| 109
| 0.711721
|
import pytest
from selenium import webdriver
from model.application import Application
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="firefox", help="browser type")
parser.addoption("--base_url", action="store", default="http://localhost:9080/php4dvd/", help="base URL")
@pytest.fixture(scope="session")
def browser_type(request):
return request.config.getoption("--browser")
@pytest.fixture(scope="session")
def base_url(request):
return request.config.getoption("--base_url")
@pytest.fixture(scope="session")
def app(request, browser_type, base_url):
if browser_type == "firefox":
driver = webdriver.Firefox()
elif browser_type == "chrome":
driver = webdriver.Chrome()
elif browser_type == "ie":
driver = webdriver.Ie()
request.addfinalizer(driver.quit)
return Application(driver, base_url)
| true
| true
|
79051ff8ad662d97e3318a7b6db079fc55e553ed
| 459
|
py
|
Python
|
Admins/migrations/0037_auto_20210310_0337.py
|
sd2001/Test-X
|
8f793420644c860f51c718716d7ad2a96f1b72c0
|
[
"MIT"
] | 1
|
2021-03-29T17:54:51.000Z
|
2021-03-29T17:54:51.000Z
|
Admins/migrations/0037_auto_20210310_0337.py
|
sd2001/Test-X
|
8f793420644c860f51c718716d7ad2a96f1b72c0
|
[
"MIT"
] | null | null | null |
Admins/migrations/0037_auto_20210310_0337.py
|
sd2001/Test-X
|
8f793420644c860f51c718716d7ad2a96f1b72c0
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-03-10 03:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Admins', '0036_auto_20210310_0337'),
]
operations = [
migrations.AlterField(
model_name='createpractioner',
name='id',
field=models.CharField(default='P27fc1', editable=False, max_length=6, primary_key=True, serialize=False),
),
]
| 24.157895
| 118
| 0.631808
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Admins', '0036_auto_20210310_0337'),
]
operations = [
migrations.AlterField(
model_name='createpractioner',
name='id',
field=models.CharField(default='P27fc1', editable=False, max_length=6, primary_key=True, serialize=False),
),
]
| true
| true
|
790520f670865a773ddf30b5cdb83f7669b1e83d
| 45
|
py
|
Python
|
cardboard/cards/__init__.py
|
Julian/cardboard
|
6ab676d04b38bf9b0d0c4a849364159947b8ea7f
|
[
"MIT"
] | 5
|
2015-03-23T10:25:40.000Z
|
2021-05-29T06:00:40.000Z
|
cardboard/cards/__init__.py
|
Julian/cardboard
|
6ab676d04b38bf9b0d0c4a849364159947b8ea7f
|
[
"MIT"
] | null | null | null |
cardboard/cards/__init__.py
|
Julian/cardboard
|
6ab676d04b38bf9b0d0c4a849364159947b8ea7f
|
[
"MIT"
] | 1
|
2019-02-17T14:45:29.000Z
|
2019-02-17T14:45:29.000Z
|
from cardboard.cards.core import cards, card
| 22.5
| 44
| 0.822222
|
from cardboard.cards.core import cards, card
| true
| true
|
7905224699fbac6a4259ad9069a3b14b15d7ad2c
| 3,350
|
py
|
Python
|
cen/regularizers/entropy.py
|
crodriguez1a/cen
|
f03397a0bf4ac24162e270907d623f8658179e88
|
[
"Apache-2.0"
] | 6
|
2020-02-23T04:53:08.000Z
|
2022-01-10T18:13:37.000Z
|
cen/regularizers/entropy.py
|
crodriguez1a/cen
|
f03397a0bf4ac24162e270907d623f8658179e88
|
[
"Apache-2.0"
] | null | null | null |
cen/regularizers/entropy.py
|
crodriguez1a/cen
|
f03397a0bf4ac24162e270907d623f8658179e88
|
[
"Apache-2.0"
] | 5
|
2020-09-27T23:46:33.000Z
|
2021-10-14T07:42:54.000Z
|
# Copyright 2020 Maruan Al-Shedivat. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Entropy-based activity regularizers."""
import tensorflow as tf
from tensorflow.python.keras.regularizers import Regularizer
class ContextConditionalNegativeEntropy(Regularizer):
"""Encourages models with higher context-conditional entropy."""
def __init__(self, coeff=0., num_samples=256, stddev=2e-1, epsilon=1e-6):
self.coeff = coeff
self.stddev = stddev
self.epsilon = epsilon
self.num_samples = num_samples
def __call__(self, x):
if self.coeff == 0.:
return tf.constant(0.)
# Unpack inputs.
# contextual_weights:
# kernels: <float32> [batch_size, feature_dim, num_classes].
# biases: <float32> [batch_size, num_classes].
# features: <float32> [batch_size, feature_dim].
# outputs: <float32> [batch_size, num_classes].
contextual_weights, features, outputs = x
# Generate features from P(x | c).
# <float32> [batch_size, num_samples, feature_dim].
features_shape = tf.shape(features)
features_noise = tf.random.normal(
shape=(features_shape[0], self.num_samples, features_shape[1]),
stddev=self.stddev
)
# <float32> [batch_size, num_samples, feature_dim].
features_prime = tf.expand_dims(features, axis=1) + features_noise
# Compute log mean_j P(Y | x_j, c_i).
# <float32> [batch_size, num_samples, num_classes].
logits = tf.einsum(
"ipk,ijp->ijk", contextual_weights["kernels"], features_prime
)
if "biases" in contextual_weights:
# <float32> [batch_size, num_samples, units].
biases = tf.expand_dims(contextual_weights["biases"], axis=1)
logits = tf.add(logits, biases)
# <float32> [batch_size, num_classes].
probs = tf.reduce_mean(tf.nn.softmax(logits), axis=1) + self.epsilon
probs_sum = tf.reduce_sum(probs, axis=-1, keepdims=True)
log_probs = tf.math.log(probs / probs_sum)
# Compute loss.
loss = -tf.nn.softmax_cross_entropy_with_logits(
labels=tf.nn.softmax(outputs), logits=log_probs
)
return self.coeff * tf.reduce_mean(loss)
def __str__(self):
config = self.get_config()
return "{name:s}({coeff:f})".format(**config)
def get_config(self):
return {"name": self.__class__.__name__, "coeff": float(self.coeff)}
# Aliases.
def ctx_cond_neg_ent(coeff=0., num_samples=32, stddev=.1, epsilon=1e-6):
return ContextConditionalNegativeEntropy(
coeff=coeff, num_samples=num_samples, stddev=stddev, epsilon=epsilon
)
| 38.068182
| 80
| 0.643881
|
import tensorflow as tf
from tensorflow.python.keras.regularizers import Regularizer
class ContextConditionalNegativeEntropy(Regularizer):
def __init__(self, coeff=0., num_samples=256, stddev=2e-1, epsilon=1e-6):
self.coeff = coeff
self.stddev = stddev
self.epsilon = epsilon
self.num_samples = num_samples
def __call__(self, x):
if self.coeff == 0.:
return tf.constant(0.)
contextual_weights, features, outputs = x
features_shape = tf.shape(features)
features_noise = tf.random.normal(
shape=(features_shape[0], self.num_samples, features_shape[1]),
stddev=self.stddev
)
features_prime = tf.expand_dims(features, axis=1) + features_noise
logits = tf.einsum(
"ipk,ijp->ijk", contextual_weights["kernels"], features_prime
)
if "biases" in contextual_weights:
biases = tf.expand_dims(contextual_weights["biases"], axis=1)
logits = tf.add(logits, biases)
probs = tf.reduce_mean(tf.nn.softmax(logits), axis=1) + self.epsilon
probs_sum = tf.reduce_sum(probs, axis=-1, keepdims=True)
log_probs = tf.math.log(probs / probs_sum)
loss = -tf.nn.softmax_cross_entropy_with_logits(
labels=tf.nn.softmax(outputs), logits=log_probs
)
return self.coeff * tf.reduce_mean(loss)
def __str__(self):
config = self.get_config()
return "{name:s}({coeff:f})".format(**config)
def get_config(self):
return {"name": self.__class__.__name__, "coeff": float(self.coeff)}
def ctx_cond_neg_ent(coeff=0., num_samples=32, stddev=.1, epsilon=1e-6):
return ContextConditionalNegativeEntropy(
coeff=coeff, num_samples=num_samples, stddev=stddev, epsilon=epsilon
)
| true
| true
|
79052318c9dc00983ebd4e280a4e9d7bbbf905d6
| 1,258
|
py
|
Python
|
EstCondicional.py
|
royturpo123/EXAMEN-01
|
7ba07defb0913ef38fadfdb691271929f92d2086
|
[
"Apache-2.0"
] | null | null | null |
EstCondicional.py
|
royturpo123/EXAMEN-01
|
7ba07defb0913ef38fadfdb691271929f92d2086
|
[
"Apache-2.0"
] | null | null | null |
EstCondicional.py
|
royturpo123/EXAMEN-01
|
7ba07defb0913ef38fadfdb691271929f92d2086
|
[
"Apache-2.0"
] | null | null | null |
def calculalaNotafinalRGHT1():
#defenir variables
calculalanotaFinalRGHT=20
#datos de entrada
notaFinalRGHT=float(input("Ingrese la nota final"))
calculalaNotafinalRGHT=float(input("ingrese"))
#Proceso
if primeraUnidad<=20% and notaObotenida>=14:
primeranota=notaFinalRGHT
elif segundaUnidad<=15% and notaObotenida>=17:
segundanota=notaFinalRGHT*2
elif terceraUnidad<=15% and notaObotenida>=15:
terceranota=notaObotenida*3
elif mientraselTrabajofinal<=50% and notaObotenida>=20:
trabajofinalnota=notaObotenida*4
#datos de salida
print("la nota final de Fundamentos de programación:",notaObotenida)
}
}
def bonoDocenteRGHT2():
#definir Variables
bonoObtenido=0.0
#Datos de Endrada
salarioMinimoRGHT=float(input("Ingrese el salario minimo:"))
puntuacionObtenidaRGHT=float(input("Ingrese la puntuación que ha obtenido:"))
#Proceso
if puntuacionObtenida<=100 and puntuacionObtenida>=0:
bonoObtenido=salarioMinimo
elif puntuacionObtenida >=101 and puntuacionObtenida<=150:
bonoObtenido=salarioMinimo*2
elif puntuacionObtenida>150:
bonoObtenido=salarioMinimo*3
#Datos de salida
print("El docente obtendra un bono de:", bonoObtenido )
}
def
calculalaNotafinalRGHT1()
#bonoDocenteRGHT
| 29.255814
| 79
| 0.771065
|
def calculalaNotafinalRGHT1():
calculalanotaFinalRGHT=20
notaFinalRGHT=float(input("Ingrese la nota final"))
calculalaNotafinalRGHT=float(input("ingrese"))
if primeraUnidad<=20% and notaObotenida>=14:
primeranota=notaFinalRGHT
elif segundaUnidad<=15% and notaObotenida>=17:
segundanota=notaFinalRGHT*2
elif terceraUnidad<=15% and notaObotenida>=15:
terceranota=notaObotenida*3
elif mientraselTrabajofinal<=50% and notaObotenida>=20:
trabajofinalnota=notaObotenida*4
print("la nota final de Fundamentos de programación:",notaObotenida)
}
}
def bonoDocenteRGHT2():
bonoObtenido=0.0
salarioMinimoRGHT=float(input("Ingrese el salario minimo:"))
puntuacionObtenidaRGHT=float(input("Ingrese la puntuación que ha obtenido:"))
if puntuacionObtenida<=100 and puntuacionObtenida>=0:
bonoObtenido=salarioMinimo
elif puntuacionObtenida >=101 and puntuacionObtenida<=150:
bonoObtenido=salarioMinimo*2
elif puntuacionObtenida>150:
bonoObtenido=salarioMinimo*3
print("El docente obtendra un bono de:", bonoObtenido )
}
def
calculalaNotafinalRGHT1()
| false
| true
|
790523a20a66d0671469195f542a98b737e40593
| 12,529
|
py
|
Python
|
cryptoapis/model/coins_forwarding_success_data.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | 5
|
2021-05-17T04:45:03.000Z
|
2022-03-23T12:51:46.000Z
|
cryptoapis/model/coins_forwarding_success_data.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | null | null | null |
cryptoapis/model/coins_forwarding_success_data.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | 2
|
2021-06-02T07:32:26.000Z
|
2022-02-12T02:36:23.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.coins_forwarding_success_data_item import CoinsForwardingSuccessDataItem
globals()['CoinsForwardingSuccessDataItem'] = CoinsForwardingSuccessDataItem
class CoinsForwardingSuccessData(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'product': (str,), # noqa: E501
'event': (str,), # noqa: E501
'item': (CoinsForwardingSuccessDataItem,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'product': 'product', # noqa: E501
'event': 'event', # noqa: E501
'item': 'item', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, product, event, item, *args, **kwargs): # noqa: E501
"""CoinsForwardingSuccessData - a model defined in OpenAPI
Args:
product (str): Represents the Crypto APIs 2.0 product which sends the callback.
event (str): Defines the specific event, for which a callback subscription is set.
item (CoinsForwardingSuccessDataItem):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.product = product
self.event = event
self.item = item
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, product, event, item, *args, **kwargs): # noqa: E501
"""CoinsForwardingSuccessData - a model defined in OpenAPI
Args:
product (str): Represents the Crypto APIs 2.0 product which sends the callback.
event (str): Defines the specific event, for which a callback subscription is set.
item (CoinsForwardingSuccessDataItem):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.product = product
self.event = event
self.item = item
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.587189
| 484
| 0.58624
|
import re
import sys
from cryptoapis.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.coins_forwarding_success_data_item import CoinsForwardingSuccessDataItem
globals()['CoinsForwardingSuccessDataItem'] = CoinsForwardingSuccessDataItem
class CoinsForwardingSuccessData(ModelNormal):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'product': (str,),
'event': (str,),
'item': (CoinsForwardingSuccessDataItem,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'product': 'product',
'event': 'event',
'item': 'item',
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, product, event, item, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.product = product
self.event = event
self.item = item
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, product, event, item, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.product = product
self.event = event
self.item = item
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| true
| true
|
7905246e286c3af6d32aefa7b31f4428b7fd2590
| 18,385
|
py
|
Python
|
tests/python/test_queues.py
|
ProvoK/trio-asyncio
|
8098e93a63eedf7188545cbda45e54c0bcdd85fc
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/python/test_queues.py
|
ProvoK/trio-asyncio
|
8098e93a63eedf7188545cbda45e54c0bcdd85fc
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/python/test_queues.py
|
ProvoK/trio-asyncio
|
8098e93a63eedf7188545cbda45e54c0bcdd85fc
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
"""Tests for queues.py"""
import sys
import unittest
from unittest import mock
import asyncio
from .. import utils as test_utils
class _QueueTestBase(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
class QueueBasicTests(_QueueTestBase):
def _test_repr_or_str(self, fn, expect_id):
"""Test Queue's repr or str.
fn is repr or str. expect_id is True if we expect the Queue's id to
appear in fn(Queue()).
"""
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0.1
self.assertAlmostEqual(0.2, when)
yield 0.1
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
self.assertTrue(fn(q).startswith('<Queue'), fn(q))
id_is_present = hex(id(q)) in fn(q)
self.assertEqual(expect_id, id_is_present)
@asyncio.coroutine
def add_getter():
q = asyncio.Queue(loop=loop)
# Start a task that waits to get.
asyncio.Task(q.get(), loop=loop)
# Let it start waiting.
yield from asyncio.sleep(0.1, loop=loop)
self.assertTrue('_getters[1]' in fn(q))
# resume q.get coroutine to finish generator
q.put_nowait(0)
loop.run_until_complete(add_getter())
@asyncio.coroutine
def add_putter():
q = asyncio.Queue(maxsize=1, loop=loop)
q.put_nowait(1)
# Start a task that waits to put.
asyncio.Task(q.put(2), loop=loop)
# Let it start waiting.
yield from asyncio.sleep(0.1, loop=loop)
self.assertTrue('_putters[1]' in fn(q))
# resume q.put coroutine to finish generator
q.get_nowait()
loop.run_until_complete(add_putter())
q = asyncio.Queue(loop=loop)
q.put_nowait(1)
self.assertTrue('_queue=[1]' in fn(q))
def test_ctor_loop(self):
loop = mock.Mock()
q = asyncio.Queue(loop=loop)
self.assertIs(q._loop, loop)
q = asyncio.Queue(loop=self.loop)
self.assertIs(q._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
q = asyncio.Queue()
self.assertIs(q._loop, self.loop)
def test_repr(self):
self._test_repr_or_str(repr, True)
def test_str(self):
self._test_repr_or_str(str, False)
def test_empty(self):
q = asyncio.Queue(loop=self.loop)
self.assertTrue(q.empty())
q.put_nowait(1)
self.assertFalse(q.empty())
self.assertEqual(1, q.get_nowait())
self.assertTrue(q.empty())
def test_full(self):
q = asyncio.Queue(loop=self.loop)
self.assertFalse(q.full())
q = asyncio.Queue(maxsize=1, loop=self.loop)
q.put_nowait(1)
self.assertTrue(q.full())
def test_order(self):
q = asyncio.Queue(loop=self.loop)
for i in [1, 3, 2]:
q.put_nowait(i)
items = [q.get_nowait() for _ in range(3)]
self.assertEqual([1, 3, 2], items)
def test_maxsize(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
when = yield 0.01
self.assertAlmostEqual(0.02, when)
yield 0.01
loop = self.new_test_loop(gen)
q = asyncio.Queue(maxsize=2, loop=loop)
self.assertEqual(2, q.maxsize)
have_been_put = []
@asyncio.coroutine
def putter():
for i in range(3):
yield from q.put(i)
have_been_put.append(i)
return True
@asyncio.coroutine
def test():
t = asyncio.Task(putter(), loop=loop)
yield from asyncio.sleep(0.01, loop=loop)
# The putter is blocked after putting two items.
self.assertEqual([0, 1], have_been_put)
self.assertEqual(0, q.get_nowait())
# Let the putter resume and put last item.
yield from asyncio.sleep(0.01, loop=loop)
self.assertEqual([0, 1, 2], have_been_put)
self.assertEqual(1, q.get_nowait())
self.assertEqual(2, q.get_nowait())
self.assertTrue(t.done())
self.assertTrue(t.result())
loop.run_until_complete(test())
self.assertAlmostEqual(0.02, loop.time())
class QueueGetTests(_QueueTestBase):
def test_blocking_get(self):
q = asyncio.Queue(loop=self.loop)
q.put_nowait(1)
@asyncio.coroutine
def queue_get():
return (yield from q.get())
res = self.loop.run_until_complete(queue_get())
self.assertEqual(1, res)
def test_get_with_putters(self):
q = asyncio.Queue(1, loop=self.loop)
q.put_nowait(1)
waiter = asyncio.Future(loop=self.loop)
q._putters.append(waiter)
res = self.loop.run_until_complete(q.get())
self.assertEqual(1, res)
self.assertTrue(waiter.done())
self.assertIsNone(waiter.result())
def test_blocking_get_wait(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
started = asyncio.Event(loop=loop)
finished = False
@asyncio.coroutine
def queue_get():
nonlocal finished
started.set()
res = yield from q.get()
finished = True
return res
@asyncio.coroutine
def queue_put():
loop.call_later(0.01, q.put_nowait, 1)
queue_get_task = asyncio.Task(queue_get(), loop=loop)
yield from started.wait()
self.assertFalse(finished)
res = yield from queue_get_task
self.assertTrue(finished)
return res
res = loop.run_until_complete(queue_put())
self.assertEqual(1, res)
self.assertAlmostEqual(0.01, loop.time())
def test_nonblocking_get(self):
q = asyncio.Queue(loop=self.loop)
q.put_nowait(1)
self.assertEqual(1, q.get_nowait())
def test_nonblocking_get_exception(self):
q = asyncio.Queue(loop=self.loop)
self.assertRaises(asyncio.QueueEmpty, q.get_nowait)
def test_get_cancelled(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
when = yield 0.01
self.assertAlmostEqual(0.061, when)
yield 0.05
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
@asyncio.coroutine
def queue_get():
return (yield from asyncio.wait_for(q.get(), 0.051, loop=loop))
@asyncio.coroutine
def test():
get_task = asyncio.Task(queue_get(), loop=loop)
yield from asyncio.sleep(0.01, loop=loop) # let the task start
q.put_nowait(1)
return (yield from get_task)
self.assertEqual(1, loop.run_until_complete(test()))
self.assertAlmostEqual(0.06, loop.time())
def test_get_cancelled_race(self):
q = asyncio.Queue(loop=self.loop)
t1 = asyncio.Task(q.get(), loop=self.loop)
t2 = asyncio.Task(q.get(), loop=self.loop)
test_utils.run_briefly(self.loop)
t1.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(t1.done())
q.put_nowait('a')
test_utils.run_briefly(self.loop)
self.assertEqual(t2.result(), 'a')
def test_get_with_waiting_putters(self):
q = asyncio.Queue(loop=self.loop, maxsize=1)
asyncio.Task(q.put('a'), loop=self.loop)
asyncio.Task(q.put('b'), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual(self.loop.run_until_complete(q.get()), 'a')
self.assertEqual(self.loop.run_until_complete(q.get()), 'b')
def test_why_are_getters_waiting(self):
# From issue #268.
@asyncio.coroutine
def consumer(queue, num_expected):
for _ in range(num_expected):
yield from queue.get()
@asyncio.coroutine
def producer(queue, num_items):
for i in range(num_items):
yield from queue.put(i)
queue_size = 1
producer_num_items = 5
q = asyncio.Queue(queue_size, loop=self.loop)
self.loop.run_until_complete(
asyncio.gather(
producer(q, producer_num_items), consumer(q, producer_num_items), loop=self.loop
),
)
@unittest.skipIf(sys.version_info < (3, 6, 4), "Changed in 3.6.4")
def test_cancelled_getters_not_being_held_in_self_getters(self):
def a_generator():
yield 0.1
yield 0.2
self.loop = self.new_test_loop(a_generator)
@asyncio.coroutine
def consumer(queue):
try:
yield from asyncio.wait_for(queue.get(), 0.1, loop=self.loop)
except asyncio.TimeoutError:
pass
queue = asyncio.Queue(loop=self.loop, maxsize=5)
self.loop.run_until_complete(self.loop.create_task(consumer(queue)))
self.assertEqual(len(queue._getters), 0)
class QueuePutTests(_QueueTestBase):
def test_blocking_put(self):
q = asyncio.Queue(loop=self.loop)
@asyncio.coroutine
def queue_put():
# No maxsize, won't block.
yield from q.put(1)
self.loop.run_until_complete(queue_put())
def test_blocking_put_wait(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
q = asyncio.Queue(maxsize=1, loop=loop)
started = asyncio.Event(loop=loop)
finished = False
@asyncio.coroutine
def queue_put():
nonlocal finished
started.set()
yield from q.put(1)
yield from q.put(2)
finished = True
@asyncio.coroutine
def queue_get():
loop.call_later(0.01, q.get_nowait)
queue_put_task = asyncio.Task(queue_put(), loop=loop)
yield from started.wait()
self.assertFalse(finished)
yield from queue_put_task
self.assertTrue(finished)
loop.run_until_complete(queue_get())
self.assertAlmostEqual(0.01, loop.time())
def test_nonblocking_put(self):
q = asyncio.Queue(loop=self.loop)
q.put_nowait(1)
self.assertEqual(1, q.get_nowait())
def test_get_cancel_drop_one_pending_reader(self):
def gen():
yield 0.01
yield 0.1
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
reader = loop.create_task(q.get())
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
q.put_nowait(1)
q.put_nowait(2)
reader.cancel()
try:
loop.run_until_complete(reader)
except asyncio.CancelledError:
# try again
reader = loop.create_task(q.get())
loop.run_until_complete(reader)
result = reader.result()
# if we get 2, it means 1 got dropped!
self.assertEqual(1, result)
def test_get_cancel_drop_many_pending_readers(self):
def gen():
yield 0.01
yield 0.1
loop = self.new_test_loop(gen)
loop.set_debug(True)
q = asyncio.Queue(loop=loop)
reader1 = loop.create_task(q.get())
reader2 = loop.create_task(q.get())
reader3 = loop.create_task(q.get())
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
q.put_nowait(1)
q.put_nowait(2)
reader1.cancel()
try:
loop.run_until_complete(reader1)
except asyncio.CancelledError:
pass
loop.run_until_complete(reader3)
# It is undefined in which order concurrent readers receive results.
self.assertEqual({reader2.result(), reader3.result()}, {1, 2})
def test_put_cancel_drop(self):
def gen():
yield 0.01
yield 0.1
loop = self.new_test_loop(gen)
q = asyncio.Queue(1, loop=loop)
q.put_nowait(1)
# putting a second item in the queue has to block (qsize=1)
writer = loop.create_task(q.put(2))
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
value1 = q.get_nowait()
self.assertEqual(value1, 1)
writer.cancel()
try:
loop.run_until_complete(writer)
except asyncio.CancelledError:
# try again
writer = loop.create_task(q.put(2))
loop.run_until_complete(writer)
value2 = q.get_nowait()
self.assertEqual(value2, 2)
self.assertEqual(q.qsize(), 0)
def test_nonblocking_put_exception(self):
q = asyncio.Queue(maxsize=1, loop=self.loop)
q.put_nowait(1)
self.assertRaises(asyncio.QueueFull, q.put_nowait, 2)
def test_float_maxsize(self):
q = asyncio.Queue(maxsize=1.3, loop=self.loop)
q.put_nowait(1)
q.put_nowait(2)
self.assertTrue(q.full())
self.assertRaises(asyncio.QueueFull, q.put_nowait, 3)
q = asyncio.Queue(maxsize=1.3, loop=self.loop)
@asyncio.coroutine
def queue_put():
yield from q.put(1)
yield from q.put(2)
self.assertTrue(q.full())
self.loop.run_until_complete(queue_put())
def test_put_cancelled(self):
q = asyncio.Queue(loop=self.loop)
@asyncio.coroutine
def queue_put():
yield from q.put(1)
return True
@asyncio.coroutine
def test():
return (yield from q.get())
t = asyncio.Task(queue_put(), loop=self.loop)
self.assertEqual(1, self.loop.run_until_complete(test()))
self.assertTrue(t.done())
self.assertTrue(t.result())
def test_put_cancelled_race(self):
q = asyncio.Queue(loop=self.loop, maxsize=1)
put_a = asyncio.Task(q.put('a'), loop=self.loop)
put_b = asyncio.Task(q.put('b'), loop=self.loop)
put_c = asyncio.Task(q.put('X'), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertTrue(put_a.done())
self.assertFalse(put_b.done())
put_c.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(put_c.done())
self.assertEqual(q.get_nowait(), 'a')
test_utils.run_briefly(self.loop)
self.assertEqual(q.get_nowait(), 'b')
self.loop.run_until_complete(put_b)
def test_put_with_waiting_getters(self):
q = asyncio.Queue(loop=self.loop)
t = asyncio.Task(q.get(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.loop.run_until_complete(q.put('a'))
self.assertEqual(self.loop.run_until_complete(t), 'a')
def test_why_are_putters_waiting(self):
# From issue #265.
queue = asyncio.Queue(2, loop=self.loop)
@asyncio.coroutine
def putter(item):
yield from queue.put(item)
@asyncio.coroutine
def getter():
yield
num = queue.qsize()
for _ in range(num):
queue.get_nowait()
t0 = putter(0)
t1 = putter(1)
t2 = putter(2)
t3 = putter(3)
self.loop.run_until_complete(asyncio.gather(getter(), t0, t1, t2, t3, loop=self.loop))
class LifoQueueTests(_QueueTestBase):
def test_order(self):
q = asyncio.LifoQueue(loop=self.loop)
for i in [1, 3, 2]:
q.put_nowait(i)
items = [q.get_nowait() for _ in range(3)]
self.assertEqual([2, 3, 1], items)
class PriorityQueueTests(_QueueTestBase):
def test_order(self):
q = asyncio.PriorityQueue(loop=self.loop)
for i in [1, 3, 2]:
q.put_nowait(i)
items = [q.get_nowait() for _ in range(3)]
self.assertEqual([1, 2, 3], items)
class _QueueJoinTestMixin:
q_class = None
def test_task_done_underflow(self):
q = self.q_class(loop=self.loop)
self.assertRaises(ValueError, q.task_done)
def test_task_done(self):
q = self.q_class(loop=self.loop)
for i in range(100):
q.put_nowait(i)
accumulator = 0
# Two workers get items from the queue and call task_done after each.
# Join the queue and assert all items have been processed.
running = True
@asyncio.coroutine
def worker():
nonlocal accumulator
while running:
item = yield from q.get()
accumulator += item
q.task_done()
@asyncio.coroutine
def test():
tasks = [asyncio.Task(worker(), loop=self.loop) for index in range(2)]
yield from q.join()
return tasks
tasks = self.loop.run_until_complete(test())
self.assertEqual(sum(range(100)), accumulator)
# close running generators
running = False
for i in range(len(tasks)):
q.put_nowait(0)
self.loop.run_until_complete(asyncio.wait(tasks, loop=self.loop))
def test_join_empty_queue(self):
q = self.q_class(loop=self.loop)
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
@asyncio.coroutine
def join():
yield from q.join()
yield from q.join()
self.loop.run_until_complete(join())
def test_format(self):
q = self.q_class(loop=self.loop)
self.assertEqual(q._format(), 'maxsize=0')
q._unfinished_tasks = 2
self.assertEqual(q._format(), 'maxsize=0 tasks=2')
class QueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
q_class = asyncio.Queue
class LifoQueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
q_class = asyncio.LifoQueue
class PriorityQueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
q_class = asyncio.PriorityQueue
if __name__ == '__main__':
unittest.main()
| 28.861852
| 96
| 0.585967
|
import sys
import unittest
from unittest import mock
import asyncio
from .. import utils as test_utils
class _QueueTestBase(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
class QueueBasicTests(_QueueTestBase):
def _test_repr_or_str(self, fn, expect_id):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0.1
self.assertAlmostEqual(0.2, when)
yield 0.1
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
self.assertTrue(fn(q).startswith('<Queue'), fn(q))
id_is_present = hex(id(q)) in fn(q)
self.assertEqual(expect_id, id_is_present)
@asyncio.coroutine
def add_getter():
q = asyncio.Queue(loop=loop)
asyncio.Task(q.get(), loop=loop)
yield from asyncio.sleep(0.1, loop=loop)
self.assertTrue('_getters[1]' in fn(q))
q.put_nowait(0)
loop.run_until_complete(add_getter())
@asyncio.coroutine
def add_putter():
q = asyncio.Queue(maxsize=1, loop=loop)
q.put_nowait(1)
asyncio.Task(q.put(2), loop=loop)
yield from asyncio.sleep(0.1, loop=loop)
self.assertTrue('_putters[1]' in fn(q))
q.get_nowait()
loop.run_until_complete(add_putter())
q = asyncio.Queue(loop=loop)
q.put_nowait(1)
self.assertTrue('_queue=[1]' in fn(q))
def test_ctor_loop(self):
loop = mock.Mock()
q = asyncio.Queue(loop=loop)
self.assertIs(q._loop, loop)
q = asyncio.Queue(loop=self.loop)
self.assertIs(q._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
q = asyncio.Queue()
self.assertIs(q._loop, self.loop)
def test_repr(self):
self._test_repr_or_str(repr, True)
def test_str(self):
self._test_repr_or_str(str, False)
def test_empty(self):
q = asyncio.Queue(loop=self.loop)
self.assertTrue(q.empty())
q.put_nowait(1)
self.assertFalse(q.empty())
self.assertEqual(1, q.get_nowait())
self.assertTrue(q.empty())
def test_full(self):
q = asyncio.Queue(loop=self.loop)
self.assertFalse(q.full())
q = asyncio.Queue(maxsize=1, loop=self.loop)
q.put_nowait(1)
self.assertTrue(q.full())
def test_order(self):
q = asyncio.Queue(loop=self.loop)
for i in [1, 3, 2]:
q.put_nowait(i)
items = [q.get_nowait() for _ in range(3)]
self.assertEqual([1, 3, 2], items)
def test_maxsize(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
when = yield 0.01
self.assertAlmostEqual(0.02, when)
yield 0.01
loop = self.new_test_loop(gen)
q = asyncio.Queue(maxsize=2, loop=loop)
self.assertEqual(2, q.maxsize)
have_been_put = []
@asyncio.coroutine
def putter():
for i in range(3):
yield from q.put(i)
have_been_put.append(i)
return True
@asyncio.coroutine
def test():
t = asyncio.Task(putter(), loop=loop)
yield from asyncio.sleep(0.01, loop=loop)
self.assertEqual([0, 1], have_been_put)
self.assertEqual(0, q.get_nowait())
yield from asyncio.sleep(0.01, loop=loop)
self.assertEqual([0, 1, 2], have_been_put)
self.assertEqual(1, q.get_nowait())
self.assertEqual(2, q.get_nowait())
self.assertTrue(t.done())
self.assertTrue(t.result())
loop.run_until_complete(test())
self.assertAlmostEqual(0.02, loop.time())
class QueueGetTests(_QueueTestBase):
def test_blocking_get(self):
q = asyncio.Queue(loop=self.loop)
q.put_nowait(1)
@asyncio.coroutine
def queue_get():
return (yield from q.get())
res = self.loop.run_until_complete(queue_get())
self.assertEqual(1, res)
def test_get_with_putters(self):
q = asyncio.Queue(1, loop=self.loop)
q.put_nowait(1)
waiter = asyncio.Future(loop=self.loop)
q._putters.append(waiter)
res = self.loop.run_until_complete(q.get())
self.assertEqual(1, res)
self.assertTrue(waiter.done())
self.assertIsNone(waiter.result())
def test_blocking_get_wait(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
started = asyncio.Event(loop=loop)
finished = False
@asyncio.coroutine
def queue_get():
nonlocal finished
started.set()
res = yield from q.get()
finished = True
return res
@asyncio.coroutine
def queue_put():
loop.call_later(0.01, q.put_nowait, 1)
queue_get_task = asyncio.Task(queue_get(), loop=loop)
yield from started.wait()
self.assertFalse(finished)
res = yield from queue_get_task
self.assertTrue(finished)
return res
res = loop.run_until_complete(queue_put())
self.assertEqual(1, res)
self.assertAlmostEqual(0.01, loop.time())
def test_nonblocking_get(self):
q = asyncio.Queue(loop=self.loop)
q.put_nowait(1)
self.assertEqual(1, q.get_nowait())
def test_nonblocking_get_exception(self):
q = asyncio.Queue(loop=self.loop)
self.assertRaises(asyncio.QueueEmpty, q.get_nowait)
def test_get_cancelled(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
when = yield 0.01
self.assertAlmostEqual(0.061, when)
yield 0.05
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
@asyncio.coroutine
def queue_get():
return (yield from asyncio.wait_for(q.get(), 0.051, loop=loop))
@asyncio.coroutine
def test():
get_task = asyncio.Task(queue_get(), loop=loop)
yield from asyncio.sleep(0.01, loop=loop)
q.put_nowait(1)
return (yield from get_task)
self.assertEqual(1, loop.run_until_complete(test()))
self.assertAlmostEqual(0.06, loop.time())
def test_get_cancelled_race(self):
q = asyncio.Queue(loop=self.loop)
t1 = asyncio.Task(q.get(), loop=self.loop)
t2 = asyncio.Task(q.get(), loop=self.loop)
test_utils.run_briefly(self.loop)
t1.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(t1.done())
q.put_nowait('a')
test_utils.run_briefly(self.loop)
self.assertEqual(t2.result(), 'a')
def test_get_with_waiting_putters(self):
q = asyncio.Queue(loop=self.loop, maxsize=1)
asyncio.Task(q.put('a'), loop=self.loop)
asyncio.Task(q.put('b'), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual(self.loop.run_until_complete(q.get()), 'a')
self.assertEqual(self.loop.run_until_complete(q.get()), 'b')
def test_why_are_getters_waiting(self):
@asyncio.coroutine
def consumer(queue, num_expected):
for _ in range(num_expected):
yield from queue.get()
@asyncio.coroutine
def producer(queue, num_items):
for i in range(num_items):
yield from queue.put(i)
queue_size = 1
producer_num_items = 5
q = asyncio.Queue(queue_size, loop=self.loop)
self.loop.run_until_complete(
asyncio.gather(
producer(q, producer_num_items), consumer(q, producer_num_items), loop=self.loop
),
)
@unittest.skipIf(sys.version_info < (3, 6, 4), "Changed in 3.6.4")
def test_cancelled_getters_not_being_held_in_self_getters(self):
def a_generator():
yield 0.1
yield 0.2
self.loop = self.new_test_loop(a_generator)
@asyncio.coroutine
def consumer(queue):
try:
yield from asyncio.wait_for(queue.get(), 0.1, loop=self.loop)
except asyncio.TimeoutError:
pass
queue = asyncio.Queue(loop=self.loop, maxsize=5)
self.loop.run_until_complete(self.loop.create_task(consumer(queue)))
self.assertEqual(len(queue._getters), 0)
class QueuePutTests(_QueueTestBase):
def test_blocking_put(self):
q = asyncio.Queue(loop=self.loop)
@asyncio.coroutine
def queue_put():
yield from q.put(1)
self.loop.run_until_complete(queue_put())
def test_blocking_put_wait(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
q = asyncio.Queue(maxsize=1, loop=loop)
started = asyncio.Event(loop=loop)
finished = False
@asyncio.coroutine
def queue_put():
nonlocal finished
started.set()
yield from q.put(1)
yield from q.put(2)
finished = True
@asyncio.coroutine
def queue_get():
loop.call_later(0.01, q.get_nowait)
queue_put_task = asyncio.Task(queue_put(), loop=loop)
yield from started.wait()
self.assertFalse(finished)
yield from queue_put_task
self.assertTrue(finished)
loop.run_until_complete(queue_get())
self.assertAlmostEqual(0.01, loop.time())
def test_nonblocking_put(self):
q = asyncio.Queue(loop=self.loop)
q.put_nowait(1)
self.assertEqual(1, q.get_nowait())
def test_get_cancel_drop_one_pending_reader(self):
def gen():
yield 0.01
yield 0.1
loop = self.new_test_loop(gen)
q = asyncio.Queue(loop=loop)
reader = loop.create_task(q.get())
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
q.put_nowait(1)
q.put_nowait(2)
reader.cancel()
try:
loop.run_until_complete(reader)
except asyncio.CancelledError:
# try again
reader = loop.create_task(q.get())
loop.run_until_complete(reader)
result = reader.result()
# if we get 2, it means 1 got dropped!
self.assertEqual(1, result)
def test_get_cancel_drop_many_pending_readers(self):
def gen():
yield 0.01
yield 0.1
loop = self.new_test_loop(gen)
loop.set_debug(True)
q = asyncio.Queue(loop=loop)
reader1 = loop.create_task(q.get())
reader2 = loop.create_task(q.get())
reader3 = loop.create_task(q.get())
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
q.put_nowait(1)
q.put_nowait(2)
reader1.cancel()
try:
loop.run_until_complete(reader1)
except asyncio.CancelledError:
pass
loop.run_until_complete(reader3)
# It is undefined in which order concurrent readers receive results.
self.assertEqual({reader2.result(), reader3.result()}, {1, 2})
def test_put_cancel_drop(self):
def gen():
yield 0.01
yield 0.1
loop = self.new_test_loop(gen)
q = asyncio.Queue(1, loop=loop)
q.put_nowait(1)
# putting a second item in the queue has to block (qsize=1)
writer = loop.create_task(q.put(2))
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
value1 = q.get_nowait()
self.assertEqual(value1, 1)
writer.cancel()
try:
loop.run_until_complete(writer)
except asyncio.CancelledError:
# try again
writer = loop.create_task(q.put(2))
loop.run_until_complete(writer)
value2 = q.get_nowait()
self.assertEqual(value2, 2)
self.assertEqual(q.qsize(), 0)
def test_nonblocking_put_exception(self):
q = asyncio.Queue(maxsize=1, loop=self.loop)
q.put_nowait(1)
self.assertRaises(asyncio.QueueFull, q.put_nowait, 2)
def test_float_maxsize(self):
q = asyncio.Queue(maxsize=1.3, loop=self.loop)
q.put_nowait(1)
q.put_nowait(2)
self.assertTrue(q.full())
self.assertRaises(asyncio.QueueFull, q.put_nowait, 3)
q = asyncio.Queue(maxsize=1.3, loop=self.loop)
@asyncio.coroutine
def queue_put():
yield from q.put(1)
yield from q.put(2)
self.assertTrue(q.full())
self.loop.run_until_complete(queue_put())
def test_put_cancelled(self):
q = asyncio.Queue(loop=self.loop)
@asyncio.coroutine
def queue_put():
yield from q.put(1)
return True
@asyncio.coroutine
def test():
return (yield from q.get())
t = asyncio.Task(queue_put(), loop=self.loop)
self.assertEqual(1, self.loop.run_until_complete(test()))
self.assertTrue(t.done())
self.assertTrue(t.result())
def test_put_cancelled_race(self):
q = asyncio.Queue(loop=self.loop, maxsize=1)
put_a = asyncio.Task(q.put('a'), loop=self.loop)
put_b = asyncio.Task(q.put('b'), loop=self.loop)
put_c = asyncio.Task(q.put('X'), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertTrue(put_a.done())
self.assertFalse(put_b.done())
put_c.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(put_c.done())
self.assertEqual(q.get_nowait(), 'a')
test_utils.run_briefly(self.loop)
self.assertEqual(q.get_nowait(), 'b')
self.loop.run_until_complete(put_b)
def test_put_with_waiting_getters(self):
q = asyncio.Queue(loop=self.loop)
t = asyncio.Task(q.get(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.loop.run_until_complete(q.put('a'))
self.assertEqual(self.loop.run_until_complete(t), 'a')
def test_why_are_putters_waiting(self):
# From issue #265.
queue = asyncio.Queue(2, loop=self.loop)
@asyncio.coroutine
def putter(item):
yield from queue.put(item)
@asyncio.coroutine
def getter():
yield
num = queue.qsize()
for _ in range(num):
queue.get_nowait()
t0 = putter(0)
t1 = putter(1)
t2 = putter(2)
t3 = putter(3)
self.loop.run_until_complete(asyncio.gather(getter(), t0, t1, t2, t3, loop=self.loop))
class LifoQueueTests(_QueueTestBase):
def test_order(self):
q = asyncio.LifoQueue(loop=self.loop)
for i in [1, 3, 2]:
q.put_nowait(i)
items = [q.get_nowait() for _ in range(3)]
self.assertEqual([2, 3, 1], items)
class PriorityQueueTests(_QueueTestBase):
def test_order(self):
q = asyncio.PriorityQueue(loop=self.loop)
for i in [1, 3, 2]:
q.put_nowait(i)
items = [q.get_nowait() for _ in range(3)]
self.assertEqual([1, 2, 3], items)
class _QueueJoinTestMixin:
q_class = None
def test_task_done_underflow(self):
q = self.q_class(loop=self.loop)
self.assertRaises(ValueError, q.task_done)
def test_task_done(self):
q = self.q_class(loop=self.loop)
for i in range(100):
q.put_nowait(i)
accumulator = 0
# Two workers get items from the queue and call task_done after each.
# Join the queue and assert all items have been processed.
running = True
@asyncio.coroutine
def worker():
nonlocal accumulator
while running:
item = yield from q.get()
accumulator += item
q.task_done()
@asyncio.coroutine
def test():
tasks = [asyncio.Task(worker(), loop=self.loop) for index in range(2)]
yield from q.join()
return tasks
tasks = self.loop.run_until_complete(test())
self.assertEqual(sum(range(100)), accumulator)
# close running generators
running = False
for i in range(len(tasks)):
q.put_nowait(0)
self.loop.run_until_complete(asyncio.wait(tasks, loop=self.loop))
def test_join_empty_queue(self):
q = self.q_class(loop=self.loop)
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
@asyncio.coroutine
def join():
yield from q.join()
yield from q.join()
self.loop.run_until_complete(join())
def test_format(self):
q = self.q_class(loop=self.loop)
self.assertEqual(q._format(), 'maxsize=0')
q._unfinished_tasks = 2
self.assertEqual(q._format(), 'maxsize=0 tasks=2')
class QueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
q_class = asyncio.Queue
class LifoQueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
q_class = asyncio.LifoQueue
class PriorityQueueJoinTests(_QueueJoinTestMixin, _QueueTestBase):
q_class = asyncio.PriorityQueue
if __name__ == '__main__':
unittest.main()
| true
| true
|
790524ada039137048fdeb2261c531db582c8242
| 16,233
|
py
|
Python
|
training/loss.py
|
duskvirkus/stylegan2-ada-tpu
|
2a33dcd6a3cea67006515ad7e41d80e6800d9285
|
[
"BSD-Source-Code"
] | 1
|
2021-06-20T18:07:41.000Z
|
2021-06-20T18:07:41.000Z
|
training/loss.py
|
duskvirkus/stylegan2-ada-tpu
|
2a33dcd6a3cea67006515ad7e41d80e6800d9285
|
[
"BSD-Source-Code"
] | null | null | null |
training/loss.py
|
duskvirkus/stylegan2-ada-tpu
|
2a33dcd6a3cea67006515ad7e41d80e6800d9285
|
[
"BSD-Source-Code"
] | null | null | null |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Loss functions."""
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
# ----------------------------------------------------------------------------
# Report statistic for all interested parties (AdaptiveAugment and tfevents).
def report_stat(aug, name, value):
if aug is not None:
value = aug.report_stat(name, value)
value = autosummary(name, value)
return value
# ----------------------------------------------------------------------------
# Report loss terms and collect them into EasyDict.
def report_loss(aug, G_loss, D_loss, G_reg=None, D_reg=None):
assert G_loss is not None and D_loss is not None
terms = dnnlib.EasyDict(G_reg=None, D_reg=None)
terms.G_loss = report_stat(aug, 'Loss/G/loss', G_loss)
terms.D_loss = report_stat(aug, 'Loss/D/loss', D_loss)
if G_reg is not None: terms.G_reg = report_stat(aug, 'Loss/G/reg', G_reg)
if D_reg is not None: terms.D_reg = report_stat(aug, 'Loss/D/reg', D_reg)
return terms
# ----------------------------------------------------------------------------
# Evaluate G and return results as EasyDict.
def eval_G(G, latents, labels, return_dlatents=False):
r = dnnlib.EasyDict()
r.args = dnnlib.EasyDict()
r.args.is_training = True
if return_dlatents:
r.args.return_dlatents = True
r.images = G.get_output_for(latents, labels, **r.args)
r.dlatents = None
if return_dlatents:
r.images, r.dlatents = r.images
return r
# ----------------------------------------------------------------------------
# Evaluate D and return results as EasyDict.
def eval_D(D, aug, images, labels, report=None, augment_inputs=True, return_aux=0):
r = dnnlib.EasyDict()
r.images_aug = images
r.labels_aug = labels
if augment_inputs and aug is not None:
r.images_aug, r.labels_aug = aug.apply(r.images_aug, r.labels_aug)
r.args = dnnlib.EasyDict()
r.args.is_training = True
if aug is not None:
r.args.augment_strength = aug.get_strength_var()
if return_aux > 0:
r.args.score_size = return_aux + 1
r.scores = D.get_output_for(r.images_aug, r.labels_aug, **r.args)
r.aux = None
if return_aux:
r.aux = r.scores[:, 1:]
r.scores = r.scores[:, :1]
if report is not None:
report_ops = [
report_stat(aug, 'Loss/scores/' + report, r.scores),
report_stat(aug, 'Loss/signs/' + report, tf.sign(r.scores)),
report_stat(aug, 'Loss/squares/' + report, tf.square(r.scores)),
]
with tf.control_dependencies(report_ops):
r.scores = tf.identity(r.scores)
return r
# ----------------------------------------------------------------------------
# Non-saturating logistic loss with R1 and path length regularizers, used
# in the paper "Analyzing and Improving the Image Quality of StyleGAN".
def stylegan2(G, D, aug, fake_labels, real_images, real_labels, r1_gamma=10, pl_minibatch_shrink=2, pl_decay=0.01,
pl_weight=2, G_top_k=False, G_top_k_gamma=0.9, G_top_k_frac=0.5, **_kwargs):
# Evaluate networks for the main loss.
minibatch_size = tf.shape(fake_labels)[0]
fake_latents = tf.random.normal([minibatch_size] + G.input_shapes[0][1:])
G_fake = eval_G(G, fake_latents, fake_labels, return_dlatents=True)
D_fake = eval_D(D, aug, G_fake.images, fake_labels, report='fake')
D_real = eval_D(D, aug, real_images, real_labels, report='real')
# Non-saturating logistic loss from "Generative Adversarial Nets".
with tf.name_scope('Loss_main'):
D_fake_scores = D_fake.scores
if G_top_k:
k_frac = tf.maximum(G_top_k_gamma ** G.epochs, G_top_k_frac)
k = tf.cast(np.ceil(tf.cast(minibatch_size, tf.float32) * k_frac), tf.int32)
lowest_k_scores, _ = tf.nn.top_k(-tf.squeeze(D_fake_scores), k=k) # want smallest probabilities not largest
D_fake_scores = tf.expand_dims(-lowest_k_scores, axis=1)
G_loss = tf.nn.softplus(-D_fake_scores) # -log(sigmoid(D_fake_scores)), pylint: disable=invalid-unary-operand-type
D_loss = tf.nn.softplus(D_fake.scores) # -log(1 - sigmoid(D_fake.scores))
D_loss += tf.nn.softplus(-D_real.scores) # -log(sigmoid(D_real.scores)), pylint: disable=invalid-unary-operand-type
G_reg = 0
D_reg = 0
# R1 regularizer from "Which Training Methods for GANs do actually Converge?".
if r1_gamma != 0:
with tf.name_scope('Loss_R1'):
r1_grads = tf.gradients(tf.reduce_sum(D_real.scores), [real_images])[0]
r1_penalty = tf.reduce_sum(tf.square(r1_grads), axis=[1, 2, 3])
r1_penalty = report_stat(aug, 'Loss/r1_penalty', r1_penalty)
D_reg += r1_penalty * (r1_gamma * 0.5)
# Path length regularizer from "Analyzing and Improving the Image Quality of StyleGAN".
if pl_weight != 0:
with tf.name_scope('Loss_PL'):
# Evaluate the regularization term using a smaller minibatch to conserve memory.
G_pl = G_fake
if pl_minibatch_shrink > 1:
pl_minibatch_size = minibatch_size // pl_minibatch_shrink
pl_latents = fake_latents[:pl_minibatch_size]
pl_labels = fake_labels[:pl_minibatch_size]
G_pl = eval_G(G, pl_latents, pl_labels, return_dlatents=True)
# Compute |J*y|.
pl_noise = tf.random.normal(tf.shape(G_pl.images)) / np.sqrt(np.prod(G.output_shape[2:]))
pl_grads = tf.gradients(tf.reduce_sum(G_pl.images * pl_noise), [G_pl.dlatents])[0]
pl_lengths = tf.sqrt(tf.reduce_mean(tf.reduce_sum(tf.square(pl_grads), axis=2), axis=1))
# Track exponential moving average of |J*y|.
with tf.control_dependencies(None):
pl_mean_var = tf.Variable(name='pl_mean', trainable=False, initial_value=0, dtype=tf.float32)
pl_mean = pl_mean_var + pl_decay * (tf.reduce_mean(pl_lengths) - pl_mean_var)
pl_update = tf.assign(pl_mean_var, pl_mean)
# Calculate (|J*y|-a)^2.
with tf.control_dependencies([pl_update]):
pl_penalty = tf.square(pl_lengths - pl_mean)
pl_penalty = report_stat(aug, 'Loss/pl_penalty', pl_penalty)
# Apply weight.
#
# Note: The division in pl_noise decreases the weight by num_pixels, and the reduce_mean
# in pl_lengths decreases it by num_affine_layers. The effective weight then becomes:
#
# gamma_pl = pl_weight / num_pixels / num_affine_layers
# = 2 / (r^2) / (log2(r) * 2 - 2)
# = 1 / (r^2 * (log2(r) - 1))
# = ln(2) / (r^2 * (ln(r) - ln(2))
#
G_reg += tf.tile(pl_penalty, [pl_minibatch_shrink]) * pl_weight
return report_loss(aug, G_loss, D_loss, G_reg, D_reg)
# ----------------------------------------------------------------------------
# Hybrid loss used for comparison methods used in the paper
# "Training Generative Adversarial Networks with Limited Data".
def cmethods(G, D, aug, fake_labels, real_images, real_labels,
r1_gamma=10, r2_gamma=0,
pl_minibatch_shrink=2, pl_decay=0.01, pl_weight=2,
bcr_real_weight=0, bcr_fake_weight=0, bcr_augment=None,
zcr_gen_weight=0, zcr_dis_weight=0, zcr_noise_std=0.1,
auxrot_alpha=0, auxrot_beta=0,
**_kwargs,
):
# Evaluate networks for the main loss.
minibatch_size = tf.shape(fake_labels)[0]
fake_latents = tf.random.normal([minibatch_size] + G.input_shapes[0][1:])
G_fake = eval_G(G, fake_latents, fake_labels)
D_fake = eval_D(D, aug, G_fake.images, fake_labels, report='fake')
D_real = eval_D(D, aug, real_images, real_labels, report='real')
# Non-saturating logistic loss from "Generative Adversarial Nets".
with tf.name_scope('Loss_main'):
G_loss = tf.nn.softplus(-D_fake.scores) # -log(sigmoid(D_fake.scores)), pylint: disable=invalid-unary-operand-type
D_loss = tf.nn.softplus(D_fake.scores) # -log(1 - sigmoid(D_fake.scores))
D_loss += tf.nn.softplus(-D_real.scores) # -log(sigmoid(D_real.scores)), pylint: disable=invalid-unary-operand-type
G_reg = 0
D_reg = 0
# R1 and R2 regularizers from "Which Training Methods for GANs do actually Converge?".
if r1_gamma != 0 or r2_gamma != 0:
with tf.name_scope('Loss_R1R2'):
if r1_gamma != 0:
r1_grads = tf.gradients(tf.reduce_sum(D_real.scores), [real_images])[0]
r1_penalty = tf.reduce_sum(tf.square(r1_grads), axis=[1, 2, 3])
r1_penalty = report_stat(aug, 'Loss/r1_penalty', r1_penalty)
D_reg += r1_penalty * (r1_gamma * 0.5)
if r2_gamma != 0:
r2_grads = tf.gradients(tf.reduce_sum(D_fake.scores), [G_fake.images])[0]
r2_penalty = tf.reduce_sum(tf.square(r2_grads), axis=[1, 2, 3])
r2_penalty = report_stat(aug, 'Loss/r2_penalty', r2_penalty)
D_reg += r2_penalty * (r2_gamma * 0.5)
# Path length regularizer from "Analyzing and Improving the Image Quality of StyleGAN".
if pl_weight != 0:
with tf.name_scope('Loss_PL'):
pl_minibatch_size = minibatch_size // pl_minibatch_shrink
pl_latents = fake_latents[:pl_minibatch_size]
pl_labels = fake_labels[:pl_minibatch_size]
G_pl = eval_G(G, pl_latents, pl_labels, return_dlatents=True)
pl_noise = tf.random.normal(tf.shape(G_pl.images)) / np.sqrt(np.prod(G.output_shape[2:]))
pl_grads = tf.gradients(tf.reduce_sum(G_pl.images * pl_noise), [G_pl.dlatents])[0]
pl_lengths = tf.sqrt(tf.reduce_mean(tf.reduce_sum(tf.square(pl_grads), axis=2), axis=1))
with tf.control_dependencies(None):
pl_mean_var = tf.Variable(name='pl_mean', trainable=False, initial_value=0, dtype=tf.float32)
pl_mean = pl_mean_var + pl_decay * (tf.reduce_mean(pl_lengths) - pl_mean_var)
pl_update = tf.assign(pl_mean_var, pl_mean)
with tf.control_dependencies([pl_update]):
pl_penalty = tf.square(pl_lengths - pl_mean)
pl_penalty = report_stat(aug, 'Loss/pl_penalty', pl_penalty)
G_reg += tf.tile(pl_penalty, [pl_minibatch_shrink]) * pl_weight
# bCR regularizer from "Improved consistency regularization for GANs".
if (bcr_real_weight != 0 or bcr_fake_weight != 0) and bcr_augment is not None:
with tf.name_scope('Loss_bCR'):
if bcr_real_weight != 0:
bcr_real_images, bcr_real_labels = dnnlib.util.call_func_by_name(D_real.images_aug, D_real.labels_aug,
**bcr_augment)
D_bcr_real = eval_D(D, aug, bcr_real_images, bcr_real_labels, report='real_bcr', augment_inputs=False)
bcr_real_penalty = tf.square(D_bcr_real.scores - D_real.scores)
bcr_real_penalty = report_stat(aug, 'Loss/bcr_penalty/real', bcr_real_penalty)
D_loss += bcr_real_penalty * bcr_real_weight # NOTE: Must not use lazy regularization for this term.
if bcr_fake_weight != 0:
bcr_fake_images, bcr_fake_labels = dnnlib.util.call_func_by_name(D_fake.images_aug, D_fake.labels_aug,
**bcr_augment)
D_bcr_fake = eval_D(D, aug, bcr_fake_images, bcr_fake_labels, report='fake_bcr', augment_inputs=False)
bcr_fake_penalty = tf.square(D_bcr_fake.scores - D_fake.scores)
bcr_fake_penalty = report_stat(aug, 'Loss/bcr_penalty/fake', bcr_fake_penalty)
D_loss += bcr_fake_penalty * bcr_fake_weight # NOTE: Must not use lazy regularization for this term.
# zCR regularizer from "Improved consistency regularization for GANs".
if zcr_gen_weight != 0 or zcr_dis_weight != 0:
with tf.name_scope('Loss_zCR'):
zcr_fake_latents = fake_latents + tf.random.normal([minibatch_size] + G.input_shapes[0][1:]) * zcr_noise_std
G_zcr = eval_G(G, zcr_fake_latents, fake_labels)
if zcr_gen_weight > 0:
zcr_gen_penalty = -tf.reduce_mean(tf.square(G_fake.images - G_zcr.images), axis=[1, 2, 3])
zcr_gen_penalty = report_stat(aug, 'Loss/zcr_gen_penalty', zcr_gen_penalty)
G_loss += zcr_gen_penalty * zcr_gen_weight
if zcr_dis_weight > 0:
D_zcr = eval_D(D, aug, G_zcr.images, fake_labels, report='fake_zcr', augment_inputs=False)
zcr_dis_penalty = tf.square(D_fake.scores - D_zcr.scores)
zcr_dis_penalty = report_stat(aug, 'Loss/zcr_dis_penalty', zcr_dis_penalty)
D_loss += zcr_dis_penalty * zcr_dis_weight
# Auxiliary rotation loss from "Self-supervised GANs via auxiliary rotation loss".
if auxrot_alpha != 0 or auxrot_beta != 0:
with tf.name_scope('Loss_AuxRot'):
idx = tf.range(minibatch_size * 4, dtype=tf.int32) // minibatch_size
b0 = tf.logical_or(tf.equal(idx, 0), tf.equal(idx, 1))
b1 = tf.logical_or(tf.equal(idx, 0), tf.equal(idx, 3))
b2 = tf.logical_or(tf.equal(idx, 0), tf.equal(idx, 2))
if auxrot_alpha != 0:
auxrot_fake = tf.tile(G_fake.images, [4, 1, 1, 1])
auxrot_fake = tf.where(b0, auxrot_fake, tf.reverse(auxrot_fake, [2]))
auxrot_fake = tf.where(b1, auxrot_fake, tf.reverse(auxrot_fake, [3]))
auxrot_fake = tf.where(b2, auxrot_fake, tf.transpose(auxrot_fake, [0, 1, 3, 2]))
D_auxrot_fake = eval_D(D, aug, auxrot_fake, fake_labels, return_aux=4)
G_loss += tf.nn.sparse_softmax_cross_entropy_with_logits(labels=idx, logits=D_auxrot_fake.aux) * auxrot_alpha
if auxrot_beta != 0:
auxrot_real = tf.tile(real_images, [4, 1, 1, 1])
auxrot_real = tf.where(b0, auxrot_real, tf.reverse(auxrot_real, [2]))
auxrot_real = tf.where(b1, auxrot_real, tf.reverse(auxrot_real, [3]))
auxrot_real = tf.where(b2, auxrot_real, tf.transpose(auxrot_real, [0, 1, 3, 2]))
D_auxrot_real = eval_D(D, aug, auxrot_real, real_labels, return_aux=4)
D_loss += tf.nn.sparse_softmax_cross_entropy_with_logits(labels=idx, logits=D_auxrot_real.aux) * auxrot_beta
return report_loss(aug, G_loss, D_loss, G_reg, D_reg)
# ----------------------------------------------------------------------------
# WGAN-GP loss with epsilon penalty, used in the paper
# "Progressive Growing of GANs for Improved Quality, Stability, and Variation".
def wgangp(G, D, aug, fake_labels, real_images, real_labels, wgan_epsilon=0.001, wgan_lambda=10, wgan_target=1,
**_kwargs):
minibatch_size = tf.shape(fake_labels)[0]
fake_latents = tf.random.normal([minibatch_size] + G.input_shapes[0][1:])
G_fake = eval_G(G, fake_latents, fake_labels)
D_fake = eval_D(D, aug, G_fake.images, fake_labels, report='fake')
D_real = eval_D(D, aug, real_images, real_labels, report='real')
# WGAN loss from "Wasserstein Generative Adversarial Networks".
with tf.name_scope('Loss_main'):
G_loss = -D_fake.scores # pylint: disable=invalid-unary-operand-type
D_loss = D_fake.scores - D_real.scores
# Epsilon penalty from "Progressive Growing of GANs for Improved Quality, Stability, and Variation"
with tf.name_scope('Loss_epsilon'):
epsilon_penalty = report_stat(aug, 'Loss/epsilon_penalty', tf.square(D_real.scores))
D_loss += epsilon_penalty * wgan_epsilon
# Gradient penalty from "Improved Training of Wasserstein GANs".
with tf.name_scope('Loss_GP'):
mix_factors = tf.random.uniform([minibatch_size, 1, 1, 1], 0, 1, dtype=G_fake.images.dtype)
mix_images = tflib.lerp(tf.cast(real_images, G_fake.images.dtype), G_fake.images, mix_factors)
mix_labels = real_labels # NOTE: Mixing is performed without respect to fake_labels.
D_mix = eval_D(D, aug, mix_images, mix_labels, report='mix')
mix_grads = tf.gradients(tf.reduce_sum(D_mix.scores), [mix_images])[0]
mix_norms = tf.sqrt(tf.reduce_sum(tf.square(mix_grads), axis=[1, 2, 3]))
mix_norms = report_stat(aug, 'Loss/mix_norms', mix_norms)
gradient_penalty = tf.square(mix_norms - wgan_target)
D_reg = gradient_penalty * (wgan_lambda / (wgan_target ** 2))
return report_loss(aug, G_loss, D_loss, None, D_reg)
# ----------------------------------------------------------------------------
| 49.794479
| 120
| 0.669377
|
"""Loss functions."""
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
def report_stat(aug, name, value):
if aug is not None:
value = aug.report_stat(name, value)
value = autosummary(name, value)
return value
def report_loss(aug, G_loss, D_loss, G_reg=None, D_reg=None):
assert G_loss is not None and D_loss is not None
terms = dnnlib.EasyDict(G_reg=None, D_reg=None)
terms.G_loss = report_stat(aug, 'Loss/G/loss', G_loss)
terms.D_loss = report_stat(aug, 'Loss/D/loss', D_loss)
if G_reg is not None: terms.G_reg = report_stat(aug, 'Loss/G/reg', G_reg)
if D_reg is not None: terms.D_reg = report_stat(aug, 'Loss/D/reg', D_reg)
return terms
def eval_G(G, latents, labels, return_dlatents=False):
r = dnnlib.EasyDict()
r.args = dnnlib.EasyDict()
r.args.is_training = True
if return_dlatents:
r.args.return_dlatents = True
r.images = G.get_output_for(latents, labels, **r.args)
r.dlatents = None
if return_dlatents:
r.images, r.dlatents = r.images
return r
def eval_D(D, aug, images, labels, report=None, augment_inputs=True, return_aux=0):
r = dnnlib.EasyDict()
r.images_aug = images
r.labels_aug = labels
if augment_inputs and aug is not None:
r.images_aug, r.labels_aug = aug.apply(r.images_aug, r.labels_aug)
r.args = dnnlib.EasyDict()
r.args.is_training = True
if aug is not None:
r.args.augment_strength = aug.get_strength_var()
if return_aux > 0:
r.args.score_size = return_aux + 1
r.scores = D.get_output_for(r.images_aug, r.labels_aug, **r.args)
r.aux = None
if return_aux:
r.aux = r.scores[:, 1:]
r.scores = r.scores[:, :1]
if report is not None:
report_ops = [
report_stat(aug, 'Loss/scores/' + report, r.scores),
report_stat(aug, 'Loss/signs/' + report, tf.sign(r.scores)),
report_stat(aug, 'Loss/squares/' + report, tf.square(r.scores)),
]
with tf.control_dependencies(report_ops):
r.scores = tf.identity(r.scores)
return r
def stylegan2(G, D, aug, fake_labels, real_images, real_labels, r1_gamma=10, pl_minibatch_shrink=2, pl_decay=0.01,
pl_weight=2, G_top_k=False, G_top_k_gamma=0.9, G_top_k_frac=0.5, **_kwargs):
minibatch_size = tf.shape(fake_labels)[0]
fake_latents = tf.random.normal([minibatch_size] + G.input_shapes[0][1:])
G_fake = eval_G(G, fake_latents, fake_labels, return_dlatents=True)
D_fake = eval_D(D, aug, G_fake.images, fake_labels, report='fake')
D_real = eval_D(D, aug, real_images, real_labels, report='real')
with tf.name_scope('Loss_main'):
D_fake_scores = D_fake.scores
if G_top_k:
k_frac = tf.maximum(G_top_k_gamma ** G.epochs, G_top_k_frac)
k = tf.cast(np.ceil(tf.cast(minibatch_size, tf.float32) * k_frac), tf.int32)
lowest_k_scores, _ = tf.nn.top_k(-tf.squeeze(D_fake_scores), k=k)
D_fake_scores = tf.expand_dims(-lowest_k_scores, axis=1)
G_loss = tf.nn.softplus(-D_fake_scores)
D_loss = tf.nn.softplus(D_fake.scores)
D_loss += tf.nn.softplus(-D_real.scores)
G_reg = 0
D_reg = 0
if r1_gamma != 0:
with tf.name_scope('Loss_R1'):
r1_grads = tf.gradients(tf.reduce_sum(D_real.scores), [real_images])[0]
r1_penalty = tf.reduce_sum(tf.square(r1_grads), axis=[1, 2, 3])
r1_penalty = report_stat(aug, 'Loss/r1_penalty', r1_penalty)
D_reg += r1_penalty * (r1_gamma * 0.5)
if pl_weight != 0:
with tf.name_scope('Loss_PL'):
G_pl = G_fake
if pl_minibatch_shrink > 1:
pl_minibatch_size = minibatch_size // pl_minibatch_shrink
pl_latents = fake_latents[:pl_minibatch_size]
pl_labels = fake_labels[:pl_minibatch_size]
G_pl = eval_G(G, pl_latents, pl_labels, return_dlatents=True)
pl_noise = tf.random.normal(tf.shape(G_pl.images)) / np.sqrt(np.prod(G.output_shape[2:]))
pl_grads = tf.gradients(tf.reduce_sum(G_pl.images * pl_noise), [G_pl.dlatents])[0]
pl_lengths = tf.sqrt(tf.reduce_mean(tf.reduce_sum(tf.square(pl_grads), axis=2), axis=1))
with tf.control_dependencies(None):
pl_mean_var = tf.Variable(name='pl_mean', trainable=False, initial_value=0, dtype=tf.float32)
pl_mean = pl_mean_var + pl_decay * (tf.reduce_mean(pl_lengths) - pl_mean_var)
pl_update = tf.assign(pl_mean_var, pl_mean)
with tf.control_dependencies([pl_update]):
pl_penalty = tf.square(pl_lengths - pl_mean)
pl_penalty = report_stat(aug, 'Loss/pl_penalty', pl_penalty)
G_reg += tf.tile(pl_penalty, [pl_minibatch_shrink]) * pl_weight
return report_loss(aug, G_loss, D_loss, G_reg, D_reg)
def cmethods(G, D, aug, fake_labels, real_images, real_labels,
r1_gamma=10, r2_gamma=0,
pl_minibatch_shrink=2, pl_decay=0.01, pl_weight=2,
bcr_real_weight=0, bcr_fake_weight=0, bcr_augment=None,
zcr_gen_weight=0, zcr_dis_weight=0, zcr_noise_std=0.1,
auxrot_alpha=0, auxrot_beta=0,
**_kwargs,
):
minibatch_size = tf.shape(fake_labels)[0]
fake_latents = tf.random.normal([minibatch_size] + G.input_shapes[0][1:])
G_fake = eval_G(G, fake_latents, fake_labels)
D_fake = eval_D(D, aug, G_fake.images, fake_labels, report='fake')
D_real = eval_D(D, aug, real_images, real_labels, report='real')
with tf.name_scope('Loss_main'):
G_loss = tf.nn.softplus(-D_fake.scores)
D_loss = tf.nn.softplus(D_fake.scores)
D_loss += tf.nn.softplus(-D_real.scores)
G_reg = 0
D_reg = 0
if r1_gamma != 0 or r2_gamma != 0:
with tf.name_scope('Loss_R1R2'):
if r1_gamma != 0:
r1_grads = tf.gradients(tf.reduce_sum(D_real.scores), [real_images])[0]
r1_penalty = tf.reduce_sum(tf.square(r1_grads), axis=[1, 2, 3])
r1_penalty = report_stat(aug, 'Loss/r1_penalty', r1_penalty)
D_reg += r1_penalty * (r1_gamma * 0.5)
if r2_gamma != 0:
r2_grads = tf.gradients(tf.reduce_sum(D_fake.scores), [G_fake.images])[0]
r2_penalty = tf.reduce_sum(tf.square(r2_grads), axis=[1, 2, 3])
r2_penalty = report_stat(aug, 'Loss/r2_penalty', r2_penalty)
D_reg += r2_penalty * (r2_gamma * 0.5)
if pl_weight != 0:
with tf.name_scope('Loss_PL'):
pl_minibatch_size = minibatch_size // pl_minibatch_shrink
pl_latents = fake_latents[:pl_minibatch_size]
pl_labels = fake_labels[:pl_minibatch_size]
G_pl = eval_G(G, pl_latents, pl_labels, return_dlatents=True)
pl_noise = tf.random.normal(tf.shape(G_pl.images)) / np.sqrt(np.prod(G.output_shape[2:]))
pl_grads = tf.gradients(tf.reduce_sum(G_pl.images * pl_noise), [G_pl.dlatents])[0]
pl_lengths = tf.sqrt(tf.reduce_mean(tf.reduce_sum(tf.square(pl_grads), axis=2), axis=1))
with tf.control_dependencies(None):
pl_mean_var = tf.Variable(name='pl_mean', trainable=False, initial_value=0, dtype=tf.float32)
pl_mean = pl_mean_var + pl_decay * (tf.reduce_mean(pl_lengths) - pl_mean_var)
pl_update = tf.assign(pl_mean_var, pl_mean)
with tf.control_dependencies([pl_update]):
pl_penalty = tf.square(pl_lengths - pl_mean)
pl_penalty = report_stat(aug, 'Loss/pl_penalty', pl_penalty)
G_reg += tf.tile(pl_penalty, [pl_minibatch_shrink]) * pl_weight
if (bcr_real_weight != 0 or bcr_fake_weight != 0) and bcr_augment is not None:
with tf.name_scope('Loss_bCR'):
if bcr_real_weight != 0:
bcr_real_images, bcr_real_labels = dnnlib.util.call_func_by_name(D_real.images_aug, D_real.labels_aug,
**bcr_augment)
D_bcr_real = eval_D(D, aug, bcr_real_images, bcr_real_labels, report='real_bcr', augment_inputs=False)
bcr_real_penalty = tf.square(D_bcr_real.scores - D_real.scores)
bcr_real_penalty = report_stat(aug, 'Loss/bcr_penalty/real', bcr_real_penalty)
D_loss += bcr_real_penalty * bcr_real_weight
if bcr_fake_weight != 0:
bcr_fake_images, bcr_fake_labels = dnnlib.util.call_func_by_name(D_fake.images_aug, D_fake.labels_aug,
**bcr_augment)
D_bcr_fake = eval_D(D, aug, bcr_fake_images, bcr_fake_labels, report='fake_bcr', augment_inputs=False)
bcr_fake_penalty = tf.square(D_bcr_fake.scores - D_fake.scores)
bcr_fake_penalty = report_stat(aug, 'Loss/bcr_penalty/fake', bcr_fake_penalty)
D_loss += bcr_fake_penalty * bcr_fake_weight
if zcr_gen_weight != 0 or zcr_dis_weight != 0:
with tf.name_scope('Loss_zCR'):
zcr_fake_latents = fake_latents + tf.random.normal([minibatch_size] + G.input_shapes[0][1:]) * zcr_noise_std
G_zcr = eval_G(G, zcr_fake_latents, fake_labels)
if zcr_gen_weight > 0:
zcr_gen_penalty = -tf.reduce_mean(tf.square(G_fake.images - G_zcr.images), axis=[1, 2, 3])
zcr_gen_penalty = report_stat(aug, 'Loss/zcr_gen_penalty', zcr_gen_penalty)
G_loss += zcr_gen_penalty * zcr_gen_weight
if zcr_dis_weight > 0:
D_zcr = eval_D(D, aug, G_zcr.images, fake_labels, report='fake_zcr', augment_inputs=False)
zcr_dis_penalty = tf.square(D_fake.scores - D_zcr.scores)
zcr_dis_penalty = report_stat(aug, 'Loss/zcr_dis_penalty', zcr_dis_penalty)
D_loss += zcr_dis_penalty * zcr_dis_weight
if auxrot_alpha != 0 or auxrot_beta != 0:
with tf.name_scope('Loss_AuxRot'):
idx = tf.range(minibatch_size * 4, dtype=tf.int32) // minibatch_size
b0 = tf.logical_or(tf.equal(idx, 0), tf.equal(idx, 1))
b1 = tf.logical_or(tf.equal(idx, 0), tf.equal(idx, 3))
b2 = tf.logical_or(tf.equal(idx, 0), tf.equal(idx, 2))
if auxrot_alpha != 0:
auxrot_fake = tf.tile(G_fake.images, [4, 1, 1, 1])
auxrot_fake = tf.where(b0, auxrot_fake, tf.reverse(auxrot_fake, [2]))
auxrot_fake = tf.where(b1, auxrot_fake, tf.reverse(auxrot_fake, [3]))
auxrot_fake = tf.where(b2, auxrot_fake, tf.transpose(auxrot_fake, [0, 1, 3, 2]))
D_auxrot_fake = eval_D(D, aug, auxrot_fake, fake_labels, return_aux=4)
G_loss += tf.nn.sparse_softmax_cross_entropy_with_logits(labels=idx, logits=D_auxrot_fake.aux) * auxrot_alpha
if auxrot_beta != 0:
auxrot_real = tf.tile(real_images, [4, 1, 1, 1])
auxrot_real = tf.where(b0, auxrot_real, tf.reverse(auxrot_real, [2]))
auxrot_real = tf.where(b1, auxrot_real, tf.reverse(auxrot_real, [3]))
auxrot_real = tf.where(b2, auxrot_real, tf.transpose(auxrot_real, [0, 1, 3, 2]))
D_auxrot_real = eval_D(D, aug, auxrot_real, real_labels, return_aux=4)
D_loss += tf.nn.sparse_softmax_cross_entropy_with_logits(labels=idx, logits=D_auxrot_real.aux) * auxrot_beta
return report_loss(aug, G_loss, D_loss, G_reg, D_reg)
def wgangp(G, D, aug, fake_labels, real_images, real_labels, wgan_epsilon=0.001, wgan_lambda=10, wgan_target=1,
**_kwargs):
minibatch_size = tf.shape(fake_labels)[0]
fake_latents = tf.random.normal([minibatch_size] + G.input_shapes[0][1:])
G_fake = eval_G(G, fake_latents, fake_labels)
D_fake = eval_D(D, aug, G_fake.images, fake_labels, report='fake')
D_real = eval_D(D, aug, real_images, real_labels, report='real')
with tf.name_scope('Loss_main'):
G_loss = -D_fake.scores
D_loss = D_fake.scores - D_real.scores
with tf.name_scope('Loss_epsilon'):
epsilon_penalty = report_stat(aug, 'Loss/epsilon_penalty', tf.square(D_real.scores))
D_loss += epsilon_penalty * wgan_epsilon
with tf.name_scope('Loss_GP'):
mix_factors = tf.random.uniform([minibatch_size, 1, 1, 1], 0, 1, dtype=G_fake.images.dtype)
mix_images = tflib.lerp(tf.cast(real_images, G_fake.images.dtype), G_fake.images, mix_factors)
mix_labels = real_labels
D_mix = eval_D(D, aug, mix_images, mix_labels, report='mix')
mix_grads = tf.gradients(tf.reduce_sum(D_mix.scores), [mix_images])[0]
mix_norms = tf.sqrt(tf.reduce_sum(tf.square(mix_grads), axis=[1, 2, 3]))
mix_norms = report_stat(aug, 'Loss/mix_norms', mix_norms)
gradient_penalty = tf.square(mix_norms - wgan_target)
D_reg = gradient_penalty * (wgan_lambda / (wgan_target ** 2))
return report_loss(aug, G_loss, D_loss, None, D_reg)
| false
| true
|
790526c0cdff8c1a50772aae846df2d48fd3d95d
| 371
|
py
|
Python
|
backend/api/migrations/0004_auto_20180528_2342.py
|
rkcf/dailio
|
39ef7573c005e753918b0b15d82eb4d00b7732db
|
[
"MIT"
] | 4
|
2018-04-19T15:07:43.000Z
|
2018-05-29T02:51:35.000Z
|
backend/api/migrations/0004_auto_20180528_2342.py
|
rkcf/dailio
|
39ef7573c005e753918b0b15d82eb4d00b7732db
|
[
"MIT"
] | null | null | null |
backend/api/migrations/0004_auto_20180528_2342.py
|
rkcf/dailio
|
39ef7573c005e753918b0b15d82eb4d00b7732db
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.3 on 2018-05-28 23:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_task_order'),
]
operations = [
migrations.AlterField(
model_name='task',
name='order',
field=models.IntegerField(blank=True),
),
]
| 19.526316
| 50
| 0.58221
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_task_order'),
]
operations = [
migrations.AlterField(
model_name='task',
name='order',
field=models.IntegerField(blank=True),
),
]
| true
| true
|
79052816aec8b76957540d72b3609cbf4bb76b0c
| 1,528
|
py
|
Python
|
src/commercetools/platform/client/matching_cart/by_project_key_shipping_methods_matching_cart_request_builder.py
|
lime-green/commercetools-python-sdk
|
63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6
|
[
"MIT"
] | 1
|
2021-04-07T20:01:30.000Z
|
2021-04-07T20:01:30.000Z
|
src/commercetools/platform/client/matching_cart/by_project_key_shipping_methods_matching_cart_request_builder.py
|
lime-green/commercetools-python-sdk
|
63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6
|
[
"MIT"
] | null | null | null |
src/commercetools/platform/client/matching_cart/by_project_key_shipping_methods_matching_cart_request_builder.py
|
lime-green/commercetools-python-sdk
|
63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6
|
[
"MIT"
] | null | null | null |
# Generated file, please do not change!!!
import typing
from ...models.error import ErrorResponse
from ...models.shipping_method import ShippingMethodPagedQueryResponse
if typing.TYPE_CHECKING:
from ...base_client import BaseClient
class ByProjectKeyShippingMethodsMatchingCartRequestBuilder:
_client: "BaseClient"
_project_key: str
def __init__(
self,
project_key: str,
client: "BaseClient",
):
self._project_key = project_key
self._client = client
def get(
self,
*,
cart_id: str,
expand: typing.List["str"] = None,
headers: typing.Dict[str, str] = None,
options: typing.Dict[str, typing.Any] = None,
) -> typing.Optional["ShippingMethodPagedQueryResponse"]:
headers = {} if headers is None else headers
response = self._client._get(
endpoint=f"/{self._project_key}/shipping-methods/matching-cart",
params={"cartId": cart_id, "expand": expand},
headers=headers,
options=options,
)
if response.status_code == 200:
return ShippingMethodPagedQueryResponse.deserialize(response.json())
elif response.status_code in (400, 401, 403, 500, 503):
obj = ErrorResponse.deserialize(response.json())
raise self._client._create_exception(obj, response)
elif response.status_code == 404:
return None
raise ValueError("Unhandled status code %s", response.status_code)
| 32.510638
| 80
| 0.643979
|
import typing
from ...models.error import ErrorResponse
from ...models.shipping_method import ShippingMethodPagedQueryResponse
if typing.TYPE_CHECKING:
from ...base_client import BaseClient
class ByProjectKeyShippingMethodsMatchingCartRequestBuilder:
_client: "BaseClient"
_project_key: str
def __init__(
self,
project_key: str,
client: "BaseClient",
):
self._project_key = project_key
self._client = client
def get(
self,
*,
cart_id: str,
expand: typing.List["str"] = None,
headers: typing.Dict[str, str] = None,
options: typing.Dict[str, typing.Any] = None,
) -> typing.Optional["ShippingMethodPagedQueryResponse"]:
headers = {} if headers is None else headers
response = self._client._get(
endpoint=f"/{self._project_key}/shipping-methods/matching-cart",
params={"cartId": cart_id, "expand": expand},
headers=headers,
options=options,
)
if response.status_code == 200:
return ShippingMethodPagedQueryResponse.deserialize(response.json())
elif response.status_code in (400, 401, 403, 500, 503):
obj = ErrorResponse.deserialize(response.json())
raise self._client._create_exception(obj, response)
elif response.status_code == 404:
return None
raise ValueError("Unhandled status code %s", response.status_code)
| true
| true
|
790528f084d28e12574cc8d2141823b9ee3dcd46
| 461
|
py
|
Python
|
14_Tran_An_Thien/ManagementStudents/ManagementStudents/customsettings.py
|
lpython2006e/exercies
|
84343eae57d86708a7984aa02f77183a4688a508
|
[
"MIT"
] | null | null | null |
14_Tran_An_Thien/ManagementStudents/ManagementStudents/customsettings.py
|
lpython2006e/exercies
|
84343eae57d86708a7984aa02f77183a4688a508
|
[
"MIT"
] | null | null | null |
14_Tran_An_Thien/ManagementStudents/ManagementStudents/customsettings.py
|
lpython2006e/exercies
|
84343eae57d86708a7984aa02f77183a4688a508
|
[
"MIT"
] | 8
|
2020-07-10T14:13:54.000Z
|
2020-08-03T08:17:50.000Z
|
from django.contrib.staticfiles.storage import staticfiles_storage
from django.urls import reverse
from ManagementStudents.jinja2 import Environment
# This enables us to use Django template tags like {% url ‘index’ %} or {% static ‘path/to/static/file.js’ %} in our Jinja2 templates.
def environment(**options):
env = Environment(**options)
env.globals.update({
'static': staticfiles_storage.url,
'url': reverse,
})
return env
| 32.928571
| 134
| 0.718004
|
from django.contrib.staticfiles.storage import staticfiles_storage
from django.urls import reverse
from ManagementStudents.jinja2 import Environment
def environment(**options):
env = Environment(**options)
env.globals.update({
'static': staticfiles_storage.url,
'url': reverse,
})
return env
| true
| true
|
790529ec258abe4aee691763242fc2d4e54dbd47
| 6,297
|
py
|
Python
|
tests/test_board_responses.py
|
BiffoBear/CircuitPython-AS3935
|
0ea9fef373fa9cad3a2ecb6c96b0951c5d95383b
|
[
"MIT",
"MIT-0",
"Unlicense"
] | 2
|
2021-06-27T14:45:58.000Z
|
2022-01-20T19:37:30.000Z
|
tests/test_board_responses.py
|
BiffoBear/CircuitPython-AS3935
|
0ea9fef373fa9cad3a2ecb6c96b0951c5d95383b
|
[
"MIT",
"MIT-0",
"Unlicense"
] | 2
|
2021-02-21T13:01:45.000Z
|
2022-02-15T16:34:41.000Z
|
tests/test_board_responses.py
|
BiffoBear/CircuitPython-AS3935
|
0ea9fef373fa9cad3a2ecb6c96b0951c5d95383b
|
[
"MIT",
"MIT-0",
"Unlicense"
] | 3
|
2021-04-18T05:28:29.000Z
|
2022-02-15T04:01:51.000Z
|
# SPDX-FileCopyrightText: Copyright (c) 2021 Martin Stephens
#
# SPDX-License-Identifier: MIT
"""These tests are run with a sensor connected to confirm that the correct
responses are received from the sensor.
The try - except clauses and an if __name__ == "__main__" allow the code to be
run with pytest on a Raspberry Pi or as a stand alone file copied into main.py
on a CircuitPython board. To run on a board also copy 'biffobear_as3935.py' to
the lib folder.
"""
# Many Pylnt conventions are broken for the sake of test readability
# Others fail because Pylint doesn't understand Pytest.
# Therefore skip this file.
# pylint: skip-file
import time
try:
import pytest # If this works, we're on a Raspberry Pi
import os
from CircuitPython_AS3935 import biffobear_as3935 as as3935
# try:
# sensor_attached = os.environ["SENSOR_ATTACHED"]
# except (KeyError, AttributeError):
pytestmark = pytest.mark.skip(reason="No as3935 board connected.")
print("hello world")
except ImportError:
# Deduce that pytest didn't import, so we are running on a board
import biffobear_as3935 as as3935
import board
device = None
def setup_module():
# Returns an instance of the AS3935 driver
global device
# Look for I2C connected sensor
try:
print("Setting up I2C connection...")
i2c = board.I2C()
try:
interrupt = board.D25
except AttributeError:
interrupt = board.D7
device = as3935.AS3935_I2C(i2c, interrupt_pin=interrupt)
except ValueError:
print("No I2C connection found.")
print("Setting up SPI connection...")
spi = board.SPI()
try:
cs = board.D24
interrupt = board.D25
except AttributeError:
cs = board.D5
interrupt = board.D7
device = as3935.AS3935(spi, cs, interrupt_pin=interrupt)
def teardown_module():
# Reset the chip between runs for consistent test results
device.reset()
def test_indoor_outdoor():
assert device.indoor is True # Chip default
device.indoor = False
assert device.indoor is False
def test_power_down():
assert device.power_down is False # Chip default
device.power_down = True
assert device.power_down is True
device.power_down = False
assert device.power_down is False
def test_noise_floor_level():
assert device.noise_floor_limit == 0x02 # Chip default
# Test possible values
for level in range(8):
device.noise_floor_limit = level
assert device.noise_floor_limit == level
def test_watchdog():
assert device.watchdog == 0x02 # Chip default
# Test possible values
for level in range(11):
device.watchdog = level
assert device.watchdog == level
def test_spike_rejection():
assert device.spike_threshold == 0x02 # Chip default
# Test possible values
for level in range(12):
device.spike_threshold = level
assert device.spike_threshold == level
def test_disturber_mask():
assert device.disturber_mask is False # Chip default
device.disturber_mask = True
assert device.disturber_mask is True
def test_strike_count_threshold():
assert device.strike_count_threshold == 1
# Test possible values
for level in (1, 5, 9, 16):
device.strike_count_threshold = level
assert device.strike_count_threshold == level
def test_freq_divisor():
assert device.freq_divisor == 16 # Chip default
# Test possible values
for divisor in (16, 32, 64, 128):
device.freq_divisor = divisor
assert device.freq_divisor == divisor
def test_output_antenna_freq():
assert device.output_antenna_freq is False
device.output_antenna_freq = True
assert device.output_antenna_freq is True
def test_output_srco():
assert device.output_srco is False # Chip default
device.output_srco = True
assert device.output_srco is True
def test_output_trco():
assert device.output_trco is False # Chip default
device.output_trco = True
assert device.output_trco is True
def test_tuning_capacitance():
assert device.tuning_capacitance == 0 # Chip default
# Test possible values
for capacitance in range(0, 128, 8):
device.tuning_capacitance = capacitance
assert device.tuning_capacitance == capacitance
def test_reset():
# Set a none default value
device.freq_divisor = 32
assert device.freq_divisor == 32
device.reset()
# Confirm that is reset to default
assert device.freq_divisor == 16 # Chip default
def test_commands_which_do_not_change_readable_values():
# Call to see if an exception is raised
device.clear_stats()
device.calibrate_clocks()
def test_registers_with_unpredictable_states():
# Just read them to see if an error occurs since value depends on presence of lightning.
device.energy
device.distance
device.interrupt_status
def test_read_interrupt_pin():
# The state of the pin is unknown, so just read it error free.
device.interrupt_set
if __name__ == "__main__":
print("setup...")
setup_module()
device.reset()
print("test_indoor_outdoor...")
test_indoor_outdoor()
print("power_down...")
test_power_down()
print("noise_floor_level...")
test_noise_floor_level()
print("watchdog...")
test_watchdog()
print("spike_rejection...")
test_spike_rejection()
print("strike_count_threshold...")
test_strike_count_threshold()
print("disturber_mask...")
test_disturber_mask()
print("freq_divisor...")
test_freq_divisor()
print("output_antenna_freq...")
test_output_antenna_freq()
print("output_srco...")
test_output_srco()
print("output_trco...")
test_output_trco()
print("tuning_capacitance...")
test_tuning_capacitance()
print("reset...")
test_reset()
print("commands_which_do_not_change_readable_values...")
test_commands_which_do_not_change_readable_values()
print("registers_with_unpredictable_states...")
test_registers_with_unpredictable_states()
print("Interrupt pin...")
test_read_interrupt_pin()
print("teardown...")
teardown_module()
print("Tests complete.")
| 28.237668
| 92
| 0.697157
|
# Therefore skip this file.
# pylint: skip-file
import time
try:
import pytest # If this works, we're on a Raspberry Pi
import os
from CircuitPython_AS3935 import biffobear_as3935 as as3935
pytestmark = pytest.mark.skip(reason="No as3935 board connected.")
print("hello world")
except ImportError:
import biffobear_as3935 as as3935
import board
device = None
def setup_module():
# Returns an instance of the AS3935 driver
global device
# Look for I2C connected sensor
try:
print("Setting up I2C connection...")
i2c = board.I2C()
try:
interrupt = board.D25
except AttributeError:
interrupt = board.D7
device = as3935.AS3935_I2C(i2c, interrupt_pin=interrupt)
except ValueError:
print("No I2C connection found.")
print("Setting up SPI connection...")
spi = board.SPI()
try:
cs = board.D24
interrupt = board.D25
except AttributeError:
cs = board.D5
interrupt = board.D7
device = as3935.AS3935(spi, cs, interrupt_pin=interrupt)
def teardown_module():
# Reset the chip between runs for consistent test results
device.reset()
def test_indoor_outdoor():
assert device.indoor is True # Chip default
device.indoor = False
assert device.indoor is False
def test_power_down():
assert device.power_down is False # Chip default
device.power_down = True
assert device.power_down is True
device.power_down = False
assert device.power_down is False
def test_noise_floor_level():
assert device.noise_floor_limit == 0x02 # Chip default
# Test possible values
for level in range(8):
device.noise_floor_limit = level
assert device.noise_floor_limit == level
def test_watchdog():
assert device.watchdog == 0x02 # Chip default
# Test possible values
for level in range(11):
device.watchdog = level
assert device.watchdog == level
def test_spike_rejection():
assert device.spike_threshold == 0x02 # Chip default
# Test possible values
for level in range(12):
device.spike_threshold = level
assert device.spike_threshold == level
def test_disturber_mask():
assert device.disturber_mask is False # Chip default
device.disturber_mask = True
assert device.disturber_mask is True
def test_strike_count_threshold():
assert device.strike_count_threshold == 1
# Test possible values
for level in (1, 5, 9, 16):
device.strike_count_threshold = level
assert device.strike_count_threshold == level
def test_freq_divisor():
assert device.freq_divisor == 16 # Chip default
# Test possible values
for divisor in (16, 32, 64, 128):
device.freq_divisor = divisor
assert device.freq_divisor == divisor
def test_output_antenna_freq():
assert device.output_antenna_freq is False
device.output_antenna_freq = True
assert device.output_antenna_freq is True
def test_output_srco():
assert device.output_srco is False # Chip default
device.output_srco = True
assert device.output_srco is True
def test_output_trco():
assert device.output_trco is False # Chip default
device.output_trco = True
assert device.output_trco is True
def test_tuning_capacitance():
assert device.tuning_capacitance == 0 # Chip default
# Test possible values
for capacitance in range(0, 128, 8):
device.tuning_capacitance = capacitance
assert device.tuning_capacitance == capacitance
def test_reset():
# Set a none default value
device.freq_divisor = 32
assert device.freq_divisor == 32
device.reset()
# Confirm that is reset to default
assert device.freq_divisor == 16 # Chip default
def test_commands_which_do_not_change_readable_values():
# Call to see if an exception is raised
device.clear_stats()
device.calibrate_clocks()
def test_registers_with_unpredictable_states():
# Just read them to see if an error occurs since value depends on presence of lightning.
device.energy
device.distance
device.interrupt_status
def test_read_interrupt_pin():
# The state of the pin is unknown, so just read it error free.
device.interrupt_set
if __name__ == "__main__":
print("setup...")
setup_module()
device.reset()
print("test_indoor_outdoor...")
test_indoor_outdoor()
print("power_down...")
test_power_down()
print("noise_floor_level...")
test_noise_floor_level()
print("watchdog...")
test_watchdog()
print("spike_rejection...")
test_spike_rejection()
print("strike_count_threshold...")
test_strike_count_threshold()
print("disturber_mask...")
test_disturber_mask()
print("freq_divisor...")
test_freq_divisor()
print("output_antenna_freq...")
test_output_antenna_freq()
print("output_srco...")
test_output_srco()
print("output_trco...")
test_output_trco()
print("tuning_capacitance...")
test_tuning_capacitance()
print("reset...")
test_reset()
print("commands_which_do_not_change_readable_values...")
test_commands_which_do_not_change_readable_values()
print("registers_with_unpredictable_states...")
test_registers_with_unpredictable_states()
print("Interrupt pin...")
test_read_interrupt_pin()
print("teardown...")
teardown_module()
print("Tests complete.")
| true
| true
|
79052c7ad860cdfd2852803fc5791b381650f89c
| 457
|
py
|
Python
|
source/_sample/ptt/users-search.py
|
showa-yojyo/notebook
|
82c15074c24d64a1dfcb70a526bc1deb2ecffe68
|
[
"MIT"
] | 14
|
2016-04-13T08:10:02.000Z
|
2021-04-19T09:42:51.000Z
|
source/_sample/ptt/users-search.py
|
showa-yojyo/note
|
5f262ecda3df132cb66206c465d16e174061d6b9
|
[
"MIT"
] | 88
|
2017-09-27T15:07:05.000Z
|
2019-10-02T04:05:03.000Z
|
source/_sample/ptt/users-search.py
|
showa-yojyo/note
|
5f262ecda3df132cb66206c465d16e174061d6b9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Demonstration GET users/search
# See https://dev.twitter.com/rest/reference/get/users/search
from secret import twitter_instance
tw = twitter_instance()
response = tw.users.search(
q='bot',
page=0,
count=20,
include_entities=False)
for i in response:
print('''
{screen_name} | {name}
{location}
{url}
{description}
ツイート数 {statuses_count}
フォロー {friends_count} 人
フォロワー {followers_count} 人
'''.format_map(i))
| 16.321429
| 61
| 0.706783
|
from secret import twitter_instance
tw = twitter_instance()
response = tw.users.search(
q='bot',
page=0,
count=20,
include_entities=False)
for i in response:
print('''
{screen_name} | {name}
{location}
{url}
{description}
ツイート数 {statuses_count}
フォロー {friends_count} 人
フォロワー {followers_count} 人
'''.format_map(i))
| true
| true
|
79052ccaa03aa9230489301aed1b84e00c7a12ea
| 9,537
|
py
|
Python
|
facebook_business/adobjects/vehicleoffer.py
|
GDGSNF/facebook-python-business-sdk
|
95e64a10d987d7a53963d17036b6730d07f84ab5
|
[
"CNRI-Python"
] | 576
|
2018-05-01T19:09:32.000Z
|
2022-03-31T11:45:11.000Z
|
facebook_business/adobjects/vehicleoffer.py
|
GDGSNF/facebook-python-business-sdk
|
95e64a10d987d7a53963d17036b6730d07f84ab5
|
[
"CNRI-Python"
] | 217
|
2018-05-03T07:31:59.000Z
|
2022-03-29T14:19:52.000Z
|
facebook_business/adobjects/vehicleoffer.py
|
GDGSNF/facebook-python-business-sdk
|
95e64a10d987d7a53963d17036b6730d07f84ab5
|
[
"CNRI-Python"
] | 323
|
2018-05-01T20:32:26.000Z
|
2022-03-29T07:05:12.000Z
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class VehicleOffer(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isVehicleOffer = True
super(VehicleOffer, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
amount_currency = 'amount_currency'
amount_percentage = 'amount_percentage'
amount_price = 'amount_price'
amount_qualifier = 'amount_qualifier'
applinks = 'applinks'
body_style = 'body_style'
cashback_currency = 'cashback_currency'
cashback_price = 'cashback_price'
category_specific_fields = 'category_specific_fields'
currency = 'currency'
dma_codes = 'dma_codes'
downpayment_currency = 'downpayment_currency'
downpayment_price = 'downpayment_price'
downpayment_qualifier = 'downpayment_qualifier'
end_date = 'end_date'
end_time = 'end_time'
id = 'id'
image_fetch_status = 'image_fetch_status'
images = 'images'
make = 'make'
model = 'model'
offer_description = 'offer_description'
offer_disclaimer = 'offer_disclaimer'
offer_type = 'offer_type'
price = 'price'
sanitized_images = 'sanitized_images'
start_date = 'start_date'
start_time = 'start_time'
term_length = 'term_length'
term_qualifier = 'term_qualifier'
title = 'title'
trim = 'trim'
unit_price = 'unit_price'
url = 'url'
vehicle_offer_id = 'vehicle_offer_id'
year = 'year'
class ImageFetchStatus:
direct_upload = 'DIRECT_UPLOAD'
fetched = 'FETCHED'
fetch_failed = 'FETCH_FAILED'
no_status = 'NO_STATUS'
outdated = 'OUTDATED'
partial_fetch = 'PARTIAL_FETCH'
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=VehicleOffer,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_augmented_realities_metadata(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/augmented_realities_metadata',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='EDGE',
response_parser=ObjectParser(target_class=AbstractCrudObject, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_channels_to_integrity_status(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.catalogitemchannelstointegritystatus import CatalogItemChannelsToIntegrityStatus
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/channels_to_integrity_status',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=CatalogItemChannelsToIntegrityStatus,
api_type='EDGE',
response_parser=ObjectParser(target_class=CatalogItemChannelsToIntegrityStatus, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_videos_metadata(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/videos_metadata',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='EDGE',
response_parser=ObjectParser(target_class=AbstractCrudObject, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'amount_currency': 'string',
'amount_percentage': 'float',
'amount_price': 'string',
'amount_qualifier': 'string',
'applinks': 'CatalogItemAppLinks',
'body_style': 'string',
'cashback_currency': 'string',
'cashback_price': 'string',
'category_specific_fields': 'CatalogSubVerticalList',
'currency': 'string',
'dma_codes': 'list<string>',
'downpayment_currency': 'string',
'downpayment_price': 'string',
'downpayment_qualifier': 'string',
'end_date': 'string',
'end_time': 'int',
'id': 'string',
'image_fetch_status': 'ImageFetchStatus',
'images': 'list<string>',
'make': 'string',
'model': 'string',
'offer_description': 'string',
'offer_disclaimer': 'string',
'offer_type': 'string',
'price': 'string',
'sanitized_images': 'list<string>',
'start_date': 'string',
'start_time': 'int',
'term_length': 'unsigned int',
'term_qualifier': 'string',
'title': 'string',
'trim': 'string',
'unit_price': 'Object',
'url': 'string',
'vehicle_offer_id': 'string',
'year': 'int',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['ImageFetchStatus'] = VehicleOffer.ImageFetchStatus.__dict__.values()
return field_enum_info
| 37.4
| 128
| 0.637936
|
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
class VehicleOffer(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isVehicleOffer = True
super(VehicleOffer, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
amount_currency = 'amount_currency'
amount_percentage = 'amount_percentage'
amount_price = 'amount_price'
amount_qualifier = 'amount_qualifier'
applinks = 'applinks'
body_style = 'body_style'
cashback_currency = 'cashback_currency'
cashback_price = 'cashback_price'
category_specific_fields = 'category_specific_fields'
currency = 'currency'
dma_codes = 'dma_codes'
downpayment_currency = 'downpayment_currency'
downpayment_price = 'downpayment_price'
downpayment_qualifier = 'downpayment_qualifier'
end_date = 'end_date'
end_time = 'end_time'
id = 'id'
image_fetch_status = 'image_fetch_status'
images = 'images'
make = 'make'
model = 'model'
offer_description = 'offer_description'
offer_disclaimer = 'offer_disclaimer'
offer_type = 'offer_type'
price = 'price'
sanitized_images = 'sanitized_images'
start_date = 'start_date'
start_time = 'start_time'
term_length = 'term_length'
term_qualifier = 'term_qualifier'
title = 'title'
trim = 'trim'
unit_price = 'unit_price'
url = 'url'
vehicle_offer_id = 'vehicle_offer_id'
year = 'year'
class ImageFetchStatus:
direct_upload = 'DIRECT_UPLOAD'
fetched = 'FETCHED'
fetch_failed = 'FETCH_FAILED'
no_status = 'NO_STATUS'
outdated = 'OUTDATED'
partial_fetch = 'PARTIAL_FETCH'
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=VehicleOffer,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_augmented_realities_metadata(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/augmented_realities_metadata',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='EDGE',
response_parser=ObjectParser(target_class=AbstractCrudObject, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_channels_to_integrity_status(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
from facebook_business.adobjects.catalogitemchannelstointegritystatus import CatalogItemChannelsToIntegrityStatus
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/channels_to_integrity_status',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=CatalogItemChannelsToIntegrityStatus,
api_type='EDGE',
response_parser=ObjectParser(target_class=CatalogItemChannelsToIntegrityStatus, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
def get_videos_metadata(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/videos_metadata',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=AbstractCrudObject,
api_type='EDGE',
response_parser=ObjectParser(target_class=AbstractCrudObject, api=self._api),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'amount_currency': 'string',
'amount_percentage': 'float',
'amount_price': 'string',
'amount_qualifier': 'string',
'applinks': 'CatalogItemAppLinks',
'body_style': 'string',
'cashback_currency': 'string',
'cashback_price': 'string',
'category_specific_fields': 'CatalogSubVerticalList',
'currency': 'string',
'dma_codes': 'list<string>',
'downpayment_currency': 'string',
'downpayment_price': 'string',
'downpayment_qualifier': 'string',
'end_date': 'string',
'end_time': 'int',
'id': 'string',
'image_fetch_status': 'ImageFetchStatus',
'images': 'list<string>',
'make': 'string',
'model': 'string',
'offer_description': 'string',
'offer_disclaimer': 'string',
'offer_type': 'string',
'price': 'string',
'sanitized_images': 'list<string>',
'start_date': 'string',
'start_time': 'int',
'term_length': 'unsigned int',
'term_qualifier': 'string',
'title': 'string',
'trim': 'string',
'unit_price': 'Object',
'url': 'string',
'vehicle_offer_id': 'string',
'year': 'int',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['ImageFetchStatus'] = VehicleOffer.ImageFetchStatus.__dict__.values()
return field_enum_info
| true
| true
|
79052d2fe5882f5571d528bdb9d490248fe0dc67
| 91,819
|
py
|
Python
|
prenlp/data/normalizer.py
|
awesome-archive/prenlp
|
e65acc8464b391d72d0ee086368b153c81ea19de
|
[
"Apache-2.0"
] | null | null | null |
prenlp/data/normalizer.py
|
awesome-archive/prenlp
|
e65acc8464b391d72d0ee086368b153c81ea19de
|
[
"Apache-2.0"
] | null | null | null |
prenlp/data/normalizer.py
|
awesome-archive/prenlp
|
e65acc8464b391d72d0ee086368b153c81ea19de
|
[
"Apache-2.0"
] | null | null | null |
import re
class Normalizer:
"""Normalizer return the text replaced with 'repl'.
If 'repl' is None, normalization is not applied to the pattern corresponding to 'repl'.
Args:
url_repl (str): replace all urls in text with this
tag_repl (str): replace all tags in text with this
emoji_repl (str): replace all emojis in text with this
email_repl (str): replace all emails in text with this
tel_repl (str): replace all tels in text with this
"""
def __init__(self, url_repl='[URL]', tag_repl='[TAG]', emoji_repl='[EMOJI]', email_repl='[EMAIL]', tel_repl='[TEL]'):
# repls
self.url_repl = url_repl
self.tag_repl = tag_repl
self.emoji_repl = emoji_repl
self.email_repl = email_repl
self.tel_repl = tel_repl
self._normalize = []
self._init_normalize()
def normalize(self, text: str) -> str:
"""Normalize text.
Args:
text (str): text to be normalized
"""
for normalize_fn, repl in self._normalize:
text = normalize_fn(text, repl)
return text
def _init_normalize(self) -> None:
"""Initialize normalize function.
If 'repl' is None, normalization is not applied to the pattern corresponding to 'repl'.
"""
if self.url_repl is not None:
self._normalize.append((self._url_normalize, self.url_repl))
if self.tag_repl is not None:
self._normalize.append((self._tag_normalize, self.tag_repl))
if self.emoji_repl is not None:
self._normalize.append((self._emoji_normalize, self.emoji_repl))
if self.email_repl is not None:
self._normalize.append((self._email_normalize, self.email_repl))
if self.tel_repl is not None:
self._normalize.append((self._tel_normalize, self.tel_repl))
def _url_normalize(self, text: str, repl: str, regex=re.compile(r'(https?|ftp|www)\S+')) -> str:
"""Return the string obtained by replacing all urls in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all urls in text with 'repl'
"""
text = regex.sub(repl, text)
return text
def _tag_normalize(self, text: str, repl: str, regex=re.compile(r'<[^>]*>')) -> str:
"""Return the string obtained by replacing all HTML tags in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all HTML tags in text with 'repl'
"""
text = regex.sub(repl, text)
return text
def _emoji_normalize(self, text: str, repl: str, regex=re.compile(r'\U0001f469\u200d\u2764\ufe0f\u200d\U0001f48b\u200d\U0001f468|\U0001f468\u200d\u2764\ufe0f\u200d\U0001f48b\u200d\U0001f468|\U0001f469\u200d\u2764\ufe0f\u200d\U0001f48b\u200d\U0001f469|\U0001f9d1\U0001f3fb\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fc\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fc\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fd\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fd\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fd\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fe|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fe|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3ff|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f469\U0001f3fc|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f469\U0001f3fc|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f469\U0001f3fd|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fc|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fd|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fe|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f468\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f468\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f468\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\u200d\u2764\u200d\U0001f48b\u200d\U0001f468|\U0001f468\u200d\u2764\u200d\U0001f48b\u200d\U0001f468|\U0001f469\u200d\u2764\u200d\U0001f48b\u200d\U0001f469|\U0001f468\u200d\U0001f469\u200d\U0001f467\u200d\U0001f466|\U0001f468\u200d\U0001f469\u200d\U0001f466\u200d\U0001f466|\U0001f468\u200d\U0001f469\u200d\U0001f467\u200d\U0001f467|\U0001f468\u200d\U0001f468\u200d\U0001f467\u200d\U0001f466|\U0001f468\u200d\U0001f468\u200d\U0001f466\u200d\U0001f466|\U0001f468\u200d\U0001f468\u200d\U0001f467\u200d\U0001f467|\U0001f469\u200d\U0001f469\u200d\U0001f467\u200d\U0001f466|\U0001f469\u200d\U0001f469\u200d\U0001f466\u200d\U0001f466|\U0001f469\u200d\U0001f469\u200d\U0001f467\u200d\U0001f467|\U0001f3f4\U000e0067\U000e0062\U000e0065\U000e006e\U000e0067\U000e007f|\U0001f3f4\U000e0067\U000e0062\U000e0073\U000e0063\U000e0074\U000e007f|\U0001f3f4\U000e0067\U000e0062\U000e0077\U000e006c\U000e0073\U000e007f|\U0001f469\u200d\u2764\ufe0f\u200d\U0001f468|\U0001f468\u200d\u2764\ufe0f\u200d\U0001f468|\U0001f469\u200d\u2764\ufe0f\u200d\U0001f469|\U0001f441\ufe0f\u200d\U0001f5e8\ufe0f|\U0001f471\U0001f3fb\u200d\u2642\ufe0f|\U0001f471\U0001f3fc\u200d\u2642\ufe0f|\U0001f471\U0001f3fd\u200d\u2642\ufe0f|\U0001f471\U0001f3fe\u200d\u2642\ufe0f|\U0001f471\U0001f3ff\u200d\u2642\ufe0f|\U0001f471\U0001f3fb\u200d\u2640\ufe0f|\U0001f471\U0001f3fc\u200d\u2640\ufe0f|\U0001f471\U0001f3fd\u200d\u2640\ufe0f|\U0001f471\U0001f3fe\u200d\u2640\ufe0f|\U0001f471\U0001f3ff\u200d\u2640\ufe0f|\U0001f64d\U0001f3fb\u200d\u2642\ufe0f|\U0001f64d\U0001f3fc\u200d\u2642\ufe0f|\U0001f64d\U0001f3fd\u200d\u2642\ufe0f|\U0001f64d\U0001f3fe\u200d\u2642\ufe0f|\U0001f64d\U0001f3ff\u200d\u2642\ufe0f|\U0001f64d\U0001f3fb\u200d\u2640\ufe0f|\U0001f64d\U0001f3fc\u200d\u2640\ufe0f|\U0001f64d\U0001f3fd\u200d\u2640\ufe0f|\U0001f64d\U0001f3fe\u200d\u2640\ufe0f|\U0001f64d\U0001f3ff\u200d\u2640\ufe0f|\U0001f64e\U0001f3fb\u200d\u2642\ufe0f|\U0001f64e\U0001f3fc\u200d\u2642\ufe0f|\U0001f64e\U0001f3fd\u200d\u2642\ufe0f|\U0001f64e\U0001f3fe\u200d\u2642\ufe0f|\U0001f64e\U0001f3ff\u200d\u2642\ufe0f|\U0001f64e\U0001f3fb\u200d\u2640\ufe0f|\U0001f64e\U0001f3fc\u200d\u2640\ufe0f|\U0001f64e\U0001f3fd\u200d\u2640\ufe0f|\U0001f64e\U0001f3fe\u200d\u2640\ufe0f|\U0001f64e\U0001f3ff\u200d\u2640\ufe0f|\U0001f645\U0001f3fb\u200d\u2642\ufe0f|\U0001f645\U0001f3fc\u200d\u2642\ufe0f|\U0001f645\U0001f3fd\u200d\u2642\ufe0f|\U0001f645\U0001f3fe\u200d\u2642\ufe0f|\U0001f645\U0001f3ff\u200d\u2642\ufe0f|\U0001f645\U0001f3fb\u200d\u2640\ufe0f|\U0001f645\U0001f3fc\u200d\u2640\ufe0f|\U0001f645\U0001f3fd\u200d\u2640\ufe0f|\U0001f645\U0001f3fe\u200d\u2640\ufe0f|\U0001f645\U0001f3ff\u200d\u2640\ufe0f|\U0001f646\U0001f3fb\u200d\u2642\ufe0f|\U0001f646\U0001f3fc\u200d\u2642\ufe0f|\U0001f646\U0001f3fd\u200d\u2642\ufe0f|\U0001f646\U0001f3fe\u200d\u2642\ufe0f|\U0001f646\U0001f3ff\u200d\u2642\ufe0f|\U0001f646\U0001f3fb\u200d\u2640\ufe0f|\U0001f646\U0001f3fc\u200d\u2640\ufe0f|\U0001f646\U0001f3fd\u200d\u2640\ufe0f|\U0001f646\U0001f3fe\u200d\u2640\ufe0f|\U0001f646\U0001f3ff\u200d\u2640\ufe0f|\U0001f481\U0001f3fb\u200d\u2642\ufe0f|\U0001f481\U0001f3fc\u200d\u2642\ufe0f|\U0001f481\U0001f3fd\u200d\u2642\ufe0f|\U0001f481\U0001f3fe\u200d\u2642\ufe0f|\U0001f481\U0001f3ff\u200d\u2642\ufe0f|\U0001f481\U0001f3fb\u200d\u2640\ufe0f|\U0001f481\U0001f3fc\u200d\u2640\ufe0f|\U0001f481\U0001f3fd\u200d\u2640\ufe0f|\U0001f481\U0001f3fe\u200d\u2640\ufe0f|\U0001f481\U0001f3ff\u200d\u2640\ufe0f|\U0001f64b\U0001f3fb\u200d\u2642\ufe0f|\U0001f64b\U0001f3fc\u200d\u2642\ufe0f|\U0001f64b\U0001f3fd\u200d\u2642\ufe0f|\U0001f64b\U0001f3fe\u200d\u2642\ufe0f|\U0001f64b\U0001f3ff\u200d\u2642\ufe0f|\U0001f64b\U0001f3fb\u200d\u2640\ufe0f|\U0001f64b\U0001f3fc\u200d\u2640\ufe0f|\U0001f64b\U0001f3fd\u200d\u2640\ufe0f|\U0001f64b\U0001f3fe\u200d\u2640\ufe0f|\U0001f64b\U0001f3ff\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fc\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fd\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fe\u200d\u2642\ufe0f|\U0001f9cf\U0001f3ff\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fc\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fd\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fe\u200d\u2640\ufe0f|\U0001f9cf\U0001f3ff\u200d\u2640\ufe0f|\U0001f647\U0001f3fb\u200d\u2642\ufe0f|\U0001f647\U0001f3fc\u200d\u2642\ufe0f|\U0001f647\U0001f3fd\u200d\u2642\ufe0f|\U0001f647\U0001f3fe\u200d\u2642\ufe0f|\U0001f647\U0001f3ff\u200d\u2642\ufe0f|\U0001f647\U0001f3fb\u200d\u2640\ufe0f|\U0001f647\U0001f3fc\u200d\u2640\ufe0f|\U0001f647\U0001f3fd\u200d\u2640\ufe0f|\U0001f647\U0001f3fe\u200d\u2640\ufe0f|\U0001f647\U0001f3ff\u200d\u2640\ufe0f|\U0001f926\U0001f3fb\u200d\u2642\ufe0f|\U0001f926\U0001f3fc\u200d\u2642\ufe0f|\U0001f926\U0001f3fd\u200d\u2642\ufe0f|\U0001f926\U0001f3fe\u200d\u2642\ufe0f|\U0001f926\U0001f3ff\u200d\u2642\ufe0f|\U0001f926\U0001f3fb\u200d\u2640\ufe0f|\U0001f926\U0001f3fc\u200d\u2640\ufe0f|\U0001f926\U0001f3fd\u200d\u2640\ufe0f|\U0001f926\U0001f3fe\u200d\u2640\ufe0f|\U0001f926\U0001f3ff\u200d\u2640\ufe0f|\U0001f937\U0001f3fb\u200d\u2642\ufe0f|\U0001f937\U0001f3fc\u200d\u2642\ufe0f|\U0001f937\U0001f3fd\u200d\u2642\ufe0f|\U0001f937\U0001f3fe\u200d\u2642\ufe0f|\U0001f937\U0001f3ff\u200d\u2642\ufe0f|\U0001f937\U0001f3fb\u200d\u2640\ufe0f|\U0001f937\U0001f3fc\u200d\u2640\ufe0f|\U0001f937\U0001f3fd\u200d\u2640\ufe0f|\U0001f937\U0001f3fe\u200d\u2640\ufe0f|\U0001f937\U0001f3ff\u200d\u2640\ufe0f|\U0001f468\U0001f3fb\u200d\u2695\ufe0f|\U0001f468\U0001f3fc\u200d\u2695\ufe0f|\U0001f468\U0001f3fd\u200d\u2695\ufe0f|\U0001f468\U0001f3fe\u200d\u2695\ufe0f|\U0001f468\U0001f3ff\u200d\u2695\ufe0f|\U0001f469\U0001f3fb\u200d\u2695\ufe0f|\U0001f469\U0001f3fc\u200d\u2695\ufe0f|\U0001f469\U0001f3fd\u200d\u2695\ufe0f|\U0001f469\U0001f3fe\u200d\u2695\ufe0f|\U0001f469\U0001f3ff\u200d\u2695\ufe0f|\U0001f468\U0001f3fb\u200d\u2696\ufe0f|\U0001f468\U0001f3fc\u200d\u2696\ufe0f|\U0001f468\U0001f3fd\u200d\u2696\ufe0f|\U0001f468\U0001f3fe\u200d\u2696\ufe0f|\U0001f468\U0001f3ff\u200d\u2696\ufe0f|\U0001f469\U0001f3fb\u200d\u2696\ufe0f|\U0001f469\U0001f3fc\u200d\u2696\ufe0f|\U0001f469\U0001f3fd\u200d\u2696\ufe0f|\U0001f469\U0001f3fe\u200d\u2696\ufe0f|\U0001f469\U0001f3ff\u200d\u2696\ufe0f|\U0001f468\U0001f3fb\u200d\u2708\ufe0f|\U0001f468\U0001f3fc\u200d\u2708\ufe0f|\U0001f468\U0001f3fd\u200d\u2708\ufe0f|\U0001f468\U0001f3fe\u200d\u2708\ufe0f|\U0001f468\U0001f3ff\u200d\u2708\ufe0f|\U0001f469\U0001f3fb\u200d\u2708\ufe0f|\U0001f469\U0001f3fc\u200d\u2708\ufe0f|\U0001f469\U0001f3fd\u200d\u2708\ufe0f|\U0001f469\U0001f3fe\u200d\u2708\ufe0f|\U0001f469\U0001f3ff\u200d\u2708\ufe0f|\U0001f46e\U0001f3fb\u200d\u2642\ufe0f|\U0001f46e\U0001f3fc\u200d\u2642\ufe0f|\U0001f46e\U0001f3fd\u200d\u2642\ufe0f|\U0001f46e\U0001f3fe\u200d\u2642\ufe0f|\U0001f46e\U0001f3ff\u200d\u2642\ufe0f|\U0001f46e\U0001f3fb\u200d\u2640\ufe0f|\U0001f46e\U0001f3fc\u200d\u2640\ufe0f|\U0001f46e\U0001f3fd\u200d\u2640\ufe0f|\U0001f46e\U0001f3fe\u200d\u2640\ufe0f|\U0001f46e\U0001f3ff\u200d\u2640\ufe0f|\U0001f575\ufe0f\u200d\u2642\ufe0f|\U0001f575\U0001f3fb\u200d\u2642\ufe0f|\U0001f575\U0001f3fc\u200d\u2642\ufe0f|\U0001f575\U0001f3fd\u200d\u2642\ufe0f|\U0001f575\U0001f3fe\u200d\u2642\ufe0f|\U0001f575\U0001f3ff\u200d\u2642\ufe0f|\U0001f575\ufe0f\u200d\u2640\ufe0f|\U0001f575\U0001f3fb\u200d\u2640\ufe0f|\U0001f575\U0001f3fc\u200d\u2640\ufe0f|\U0001f575\U0001f3fd\u200d\u2640\ufe0f|\U0001f575\U0001f3fe\u200d\u2640\ufe0f|\U0001f575\U0001f3ff\u200d\u2640\ufe0f|\U0001f482\U0001f3fb\u200d\u2642\ufe0f|\U0001f482\U0001f3fc\u200d\u2642\ufe0f|\U0001f482\U0001f3fd\u200d\u2642\ufe0f|\U0001f482\U0001f3fe\u200d\u2642\ufe0f|\U0001f482\U0001f3ff\u200d\u2642\ufe0f|\U0001f482\U0001f3fb\u200d\u2640\ufe0f|\U0001f482\U0001f3fc\u200d\u2640\ufe0f|\U0001f482\U0001f3fd\u200d\u2640\ufe0f|\U0001f482\U0001f3fe\u200d\u2640\ufe0f|\U0001f482\U0001f3ff\u200d\u2640\ufe0f|\U0001f477\U0001f3fb\u200d\u2642\ufe0f|\U0001f477\U0001f3fc\u200d\u2642\ufe0f|\U0001f477\U0001f3fd\u200d\u2642\ufe0f|\U0001f477\U0001f3fe\u200d\u2642\ufe0f|\U0001f477\U0001f3ff\u200d\u2642\ufe0f|\U0001f477\U0001f3fb\u200d\u2640\ufe0f|\U0001f477\U0001f3fc\u200d\u2640\ufe0f|\U0001f477\U0001f3fd\u200d\u2640\ufe0f|\U0001f477\U0001f3fe\u200d\u2640\ufe0f|\U0001f477\U0001f3ff\u200d\u2640\ufe0f|\U0001f473\U0001f3fb\u200d\u2642\ufe0f|\U0001f473\U0001f3fc\u200d\u2642\ufe0f|\U0001f473\U0001f3fd\u200d\u2642\ufe0f|\U0001f473\U0001f3fe\u200d\u2642\ufe0f|\U0001f473\U0001f3ff\u200d\u2642\ufe0f|\U0001f473\U0001f3fb\u200d\u2640\ufe0f|\U0001f473\U0001f3fc\u200d\u2640\ufe0f|\U0001f473\U0001f3fd\u200d\u2640\ufe0f|\U0001f473\U0001f3fe\u200d\u2640\ufe0f|\U0001f473\U0001f3ff\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fc\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fd\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fe\u200d\u2642\ufe0f|\U0001f9b8\U0001f3ff\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fc\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fd\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fe\u200d\u2640\ufe0f|\U0001f9b8\U0001f3ff\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fc\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fd\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fe\u200d\u2642\ufe0f|\U0001f9b9\U0001f3ff\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fc\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fd\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fe\u200d\u2640\ufe0f|\U0001f9b9\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d9\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d9\U0001f3ff\u200d\u2640\ufe0f|\U0001f9da\U0001f3fb\u200d\u2642\ufe0f|\U0001f9da\U0001f3fc\u200d\u2642\ufe0f|\U0001f9da\U0001f3fd\u200d\u2642\ufe0f|\U0001f9da\U0001f3fe\u200d\u2642\ufe0f|\U0001f9da\U0001f3ff\u200d\u2642\ufe0f|\U0001f9da\U0001f3fb\u200d\u2640\ufe0f|\U0001f9da\U0001f3fc\u200d\u2640\ufe0f|\U0001f9da\U0001f3fd\u200d\u2640\ufe0f|\U0001f9da\U0001f3fe\u200d\u2640\ufe0f|\U0001f9da\U0001f3ff\u200d\u2640\ufe0f|\U0001f9db\U0001f3fb\u200d\u2642\ufe0f|\U0001f9db\U0001f3fc\u200d\u2642\ufe0f|\U0001f9db\U0001f3fd\u200d\u2642\ufe0f|\U0001f9db\U0001f3fe\u200d\u2642\ufe0f|\U0001f9db\U0001f3ff\u200d\u2642\ufe0f|\U0001f9db\U0001f3fb\u200d\u2640\ufe0f|\U0001f9db\U0001f3fc\u200d\u2640\ufe0f|\U0001f9db\U0001f3fd\u200d\u2640\ufe0f|\U0001f9db\U0001f3fe\u200d\u2640\ufe0f|\U0001f9db\U0001f3ff\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fc\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fd\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fe\u200d\u2642\ufe0f|\U0001f9dc\U0001f3ff\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fc\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fd\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fe\u200d\u2640\ufe0f|\U0001f9dc\U0001f3ff\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fc\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fd\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fe\u200d\u2642\ufe0f|\U0001f9dd\U0001f3ff\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fc\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fd\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fe\u200d\u2640\ufe0f|\U0001f9dd\U0001f3ff\u200d\u2640\ufe0f|\U0001f486\U0001f3fb\u200d\u2642\ufe0f|\U0001f486\U0001f3fc\u200d\u2642\ufe0f|\U0001f486\U0001f3fd\u200d\u2642\ufe0f|\U0001f486\U0001f3fe\u200d\u2642\ufe0f|\U0001f486\U0001f3ff\u200d\u2642\ufe0f|\U0001f486\U0001f3fb\u200d\u2640\ufe0f|\U0001f486\U0001f3fc\u200d\u2640\ufe0f|\U0001f486\U0001f3fd\u200d\u2640\ufe0f|\U0001f486\U0001f3fe\u200d\u2640\ufe0f|\U0001f486\U0001f3ff\u200d\u2640\ufe0f|\U0001f487\U0001f3fb\u200d\u2642\ufe0f|\U0001f487\U0001f3fc\u200d\u2642\ufe0f|\U0001f487\U0001f3fd\u200d\u2642\ufe0f|\U0001f487\U0001f3fe\u200d\u2642\ufe0f|\U0001f487\U0001f3ff\u200d\u2642\ufe0f|\U0001f487\U0001f3fb\u200d\u2640\ufe0f|\U0001f487\U0001f3fc\u200d\u2640\ufe0f|\U0001f487\U0001f3fd\u200d\u2640\ufe0f|\U0001f487\U0001f3fe\u200d\u2640\ufe0f|\U0001f487\U0001f3ff\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fc\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fd\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fe\u200d\u2642\ufe0f|\U0001f6b6\U0001f3ff\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fc\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fd\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fe\u200d\u2640\ufe0f|\U0001f6b6\U0001f3ff\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fc\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fd\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fe\u200d\u2642\ufe0f|\U0001f9cd\U0001f3ff\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fc\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fd\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fe\u200d\u2640\ufe0f|\U0001f9cd\U0001f3ff\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fc\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fd\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fe\u200d\u2642\ufe0f|\U0001f9ce\U0001f3ff\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fc\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fd\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fe\u200d\u2640\ufe0f|\U0001f9ce\U0001f3ff\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fc\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fd\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fe\u200d\u2642\ufe0f|\U0001f3c3\U0001f3ff\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fc\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fd\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fe\u200d\u2640\ufe0f|\U0001f3c3\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d6\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d6\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d7\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d7\U0001f3ff\u200d\u2640\ufe0f|\U0001f3cc\ufe0f\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fb\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fc\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fd\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fe\u200d\u2642\ufe0f|\U0001f3cc\U0001f3ff\u200d\u2642\ufe0f|\U0001f3cc\ufe0f\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fb\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fc\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fd\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fe\u200d\u2640\ufe0f|\U0001f3cc\U0001f3ff\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fc\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fd\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fe\u200d\u2642\ufe0f|\U0001f3c4\U0001f3ff\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fc\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fd\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fe\u200d\u2640\ufe0f|\U0001f3c4\U0001f3ff\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fc\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fd\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fe\u200d\u2642\ufe0f|\U0001f6a3\U0001f3ff\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fc\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fd\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fe\u200d\u2640\ufe0f|\U0001f6a3\U0001f3ff\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fc\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fd\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fe\u200d\u2642\ufe0f|\U0001f3ca\U0001f3ff\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fc\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fd\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fe\u200d\u2640\ufe0f|\U0001f3ca\U0001f3ff\u200d\u2640\ufe0f|\u26f9\ufe0f\u200d\u2642\ufe0f|\u26f9\U0001f3fb\u200d\u2642\ufe0f|\u26f9\U0001f3fc\u200d\u2642\ufe0f|\u26f9\U0001f3fd\u200d\u2642\ufe0f|\u26f9\U0001f3fe\u200d\u2642\ufe0f|\u26f9\U0001f3ff\u200d\u2642\ufe0f|\u26f9\ufe0f\u200d\u2640\ufe0f|\u26f9\U0001f3fb\u200d\u2640\ufe0f|\u26f9\U0001f3fc\u200d\u2640\ufe0f|\u26f9\U0001f3fd\u200d\u2640\ufe0f|\u26f9\U0001f3fe\u200d\u2640\ufe0f|\u26f9\U0001f3ff\u200d\u2640\ufe0f|\U0001f3cb\ufe0f\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fb\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fc\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fd\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fe\u200d\u2642\ufe0f|\U0001f3cb\U0001f3ff\u200d\u2642\ufe0f|\U0001f3cb\ufe0f\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fb\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fc\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fd\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fe\u200d\u2640\ufe0f|\U0001f3cb\U0001f3ff\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fc\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fd\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fe\u200d\u2642\ufe0f|\U0001f6b4\U0001f3ff\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fc\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fd\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fe\u200d\u2640\ufe0f|\U0001f6b4\U0001f3ff\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fc\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fd\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fe\u200d\u2642\ufe0f|\U0001f6b5\U0001f3ff\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fc\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fd\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fe\u200d\u2640\ufe0f|\U0001f6b5\U0001f3ff\u200d\u2640\ufe0f|\U0001f938\U0001f3fb\u200d\u2642\ufe0f|\U0001f938\U0001f3fc\u200d\u2642\ufe0f|\U0001f938\U0001f3fd\u200d\u2642\ufe0f|\U0001f938\U0001f3fe\u200d\u2642\ufe0f|\U0001f938\U0001f3ff\u200d\u2642\ufe0f|\U0001f938\U0001f3fb\u200d\u2640\ufe0f|\U0001f938\U0001f3fc\u200d\u2640\ufe0f|\U0001f938\U0001f3fd\u200d\u2640\ufe0f|\U0001f938\U0001f3fe\u200d\u2640\ufe0f|\U0001f938\U0001f3ff\u200d\u2640\ufe0f|\U0001f93d\U0001f3fb\u200d\u2642\ufe0f|\U0001f93d\U0001f3fc\u200d\u2642\ufe0f|\U0001f93d\U0001f3fd\u200d\u2642\ufe0f|\U0001f93d\U0001f3fe\u200d\u2642\ufe0f|\U0001f93d\U0001f3ff\u200d\u2642\ufe0f|\U0001f93d\U0001f3fb\u200d\u2640\ufe0f|\U0001f93d\U0001f3fc\u200d\u2640\ufe0f|\U0001f93d\U0001f3fd\u200d\u2640\ufe0f|\U0001f93d\U0001f3fe\u200d\u2640\ufe0f|\U0001f93d\U0001f3ff\u200d\u2640\ufe0f|\U0001f93e\U0001f3fb\u200d\u2642\ufe0f|\U0001f93e\U0001f3fc\u200d\u2642\ufe0f|\U0001f93e\U0001f3fd\u200d\u2642\ufe0f|\U0001f93e\U0001f3fe\u200d\u2642\ufe0f|\U0001f93e\U0001f3ff\u200d\u2642\ufe0f|\U0001f93e\U0001f3fb\u200d\u2640\ufe0f|\U0001f93e\U0001f3fc\u200d\u2640\ufe0f|\U0001f93e\U0001f3fd\u200d\u2640\ufe0f|\U0001f93e\U0001f3fe\u200d\u2640\ufe0f|\U0001f93e\U0001f3ff\u200d\u2640\ufe0f|\U0001f939\U0001f3fb\u200d\u2642\ufe0f|\U0001f939\U0001f3fc\u200d\u2642\ufe0f|\U0001f939\U0001f3fd\u200d\u2642\ufe0f|\U0001f939\U0001f3fe\u200d\u2642\ufe0f|\U0001f939\U0001f3ff\u200d\u2642\ufe0f|\U0001f939\U0001f3fb\u200d\u2640\ufe0f|\U0001f939\U0001f3fc\u200d\u2640\ufe0f|\U0001f939\U0001f3fd\u200d\u2640\ufe0f|\U0001f939\U0001f3fe\u200d\u2640\ufe0f|\U0001f939\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d8\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d8\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d1\u200d\U0001f91d\u200d\U0001f9d1|\U0001f469\u200d\u2764\u200d\U0001f468|\U0001f468\u200d\u2764\u200d\U0001f468|\U0001f469\u200d\u2764\u200d\U0001f469|\U0001f468\u200d\U0001f469\u200d\U0001f466|\U0001f468\u200d\U0001f469\u200d\U0001f467|\U0001f468\u200d\U0001f468\u200d\U0001f466|\U0001f468\u200d\U0001f468\u200d\U0001f467|\U0001f469\u200d\U0001f469\u200d\U0001f466|\U0001f469\u200d\U0001f469\u200d\U0001f467|\U0001f468\u200d\U0001f466\u200d\U0001f466|\U0001f468\u200d\U0001f467\u200d\U0001f466|\U0001f468\u200d\U0001f467\u200d\U0001f467|\U0001f469\u200d\U0001f466\u200d\U0001f466|\U0001f469\u200d\U0001f467\u200d\U0001f466|\U0001f469\u200d\U0001f467\u200d\U0001f467|\U0001f441\u200d\U0001f5e8\ufe0f|\U0001f441\ufe0f\u200d\U0001f5e8|\U0001f471\u200d\u2642\ufe0f|\U0001f471\U0001f3fb\u200d\u2642|\U0001f471\U0001f3fc\u200d\u2642|\U0001f471\U0001f3fd\u200d\u2642|\U0001f471\U0001f3fe\u200d\u2642|\U0001f471\U0001f3ff\u200d\u2642|\U0001f468\U0001f3fb\u200d\U0001f9b0|\U0001f468\U0001f3fc\u200d\U0001f9b0|\U0001f468\U0001f3fd\u200d\U0001f9b0|\U0001f468\U0001f3fe\u200d\U0001f9b0|\U0001f468\U0001f3ff\u200d\U0001f9b0|\U0001f468\U0001f3fb\u200d\U0001f9b1|\U0001f468\U0001f3fc\u200d\U0001f9b1|\U0001f468\U0001f3fd\u200d\U0001f9b1|\U0001f468\U0001f3fe\u200d\U0001f9b1|\U0001f468\U0001f3ff\u200d\U0001f9b1|\U0001f468\U0001f3fb\u200d\U0001f9b3|\U0001f468\U0001f3fc\u200d\U0001f9b3|\U0001f468\U0001f3fd\u200d\U0001f9b3|\U0001f468\U0001f3fe\u200d\U0001f9b3|\U0001f468\U0001f3ff\u200d\U0001f9b3|\U0001f468\U0001f3fb\u200d\U0001f9b2|\U0001f468\U0001f3fc\u200d\U0001f9b2|\U0001f468\U0001f3fd\u200d\U0001f9b2|\U0001f468\U0001f3fe\u200d\U0001f9b2|\U0001f468\U0001f3ff\u200d\U0001f9b2|\U0001f471\u200d\u2640\ufe0f|\U0001f471\U0001f3fb\u200d\u2640|\U0001f471\U0001f3fc\u200d\u2640|\U0001f471\U0001f3fd\u200d\u2640|\U0001f471\U0001f3fe\u200d\u2640|\U0001f471\U0001f3ff\u200d\u2640|\U0001f469\U0001f3fb\u200d\U0001f9b0|\U0001f469\U0001f3fc\u200d\U0001f9b0|\U0001f469\U0001f3fd\u200d\U0001f9b0|\U0001f469\U0001f3fe\u200d\U0001f9b0|\U0001f469\U0001f3ff\u200d\U0001f9b0|\U0001f469\U0001f3fb\u200d\U0001f9b1|\U0001f469\U0001f3fc\u200d\U0001f9b1|\U0001f469\U0001f3fd\u200d\U0001f9b1|\U0001f469\U0001f3fe\u200d\U0001f9b1|\U0001f469\U0001f3ff\u200d\U0001f9b1|\U0001f469\U0001f3fb\u200d\U0001f9b3|\U0001f469\U0001f3fc\u200d\U0001f9b3|\U0001f469\U0001f3fd\u200d\U0001f9b3|\U0001f469\U0001f3fe\u200d\U0001f9b3|\U0001f469\U0001f3ff\u200d\U0001f9b3|\U0001f469\U0001f3fb\u200d\U0001f9b2|\U0001f469\U0001f3fc\u200d\U0001f9b2|\U0001f469\U0001f3fd\u200d\U0001f9b2|\U0001f469\U0001f3fe\u200d\U0001f9b2|\U0001f469\U0001f3ff\u200d\U0001f9b2|\U0001f64d\u200d\u2642\ufe0f|\U0001f64d\U0001f3fb\u200d\u2642|\U0001f64d\U0001f3fc\u200d\u2642|\U0001f64d\U0001f3fd\u200d\u2642|\U0001f64d\U0001f3fe\u200d\u2642|\U0001f64d\U0001f3ff\u200d\u2642|\U0001f64d\u200d\u2640\ufe0f|\U0001f64d\U0001f3fb\u200d\u2640|\U0001f64d\U0001f3fc\u200d\u2640|\U0001f64d\U0001f3fd\u200d\u2640|\U0001f64d\U0001f3fe\u200d\u2640|\U0001f64d\U0001f3ff\u200d\u2640|\U0001f64e\u200d\u2642\ufe0f|\U0001f64e\U0001f3fb\u200d\u2642|\U0001f64e\U0001f3fc\u200d\u2642|\U0001f64e\U0001f3fd\u200d\u2642|\U0001f64e\U0001f3fe\u200d\u2642|\U0001f64e\U0001f3ff\u200d\u2642|\U0001f64e\u200d\u2640\ufe0f|\U0001f64e\U0001f3fb\u200d\u2640|\U0001f64e\U0001f3fc\u200d\u2640|\U0001f64e\U0001f3fd\u200d\u2640|\U0001f64e\U0001f3fe\u200d\u2640|\U0001f64e\U0001f3ff\u200d\u2640|\U0001f645\u200d\u2642\ufe0f|\U0001f645\U0001f3fb\u200d\u2642|\U0001f645\U0001f3fc\u200d\u2642|\U0001f645\U0001f3fd\u200d\u2642|\U0001f645\U0001f3fe\u200d\u2642|\U0001f645\U0001f3ff\u200d\u2642|\U0001f645\u200d\u2640\ufe0f|\U0001f645\U0001f3fb\u200d\u2640|\U0001f645\U0001f3fc\u200d\u2640|\U0001f645\U0001f3fd\u200d\u2640|\U0001f645\U0001f3fe\u200d\u2640|\U0001f645\U0001f3ff\u200d\u2640|\U0001f646\u200d\u2642\ufe0f|\U0001f646\U0001f3fb\u200d\u2642|\U0001f646\U0001f3fc\u200d\u2642|\U0001f646\U0001f3fd\u200d\u2642|\U0001f646\U0001f3fe\u200d\u2642|\U0001f646\U0001f3ff\u200d\u2642|\U0001f646\u200d\u2640\ufe0f|\U0001f646\U0001f3fb\u200d\u2640|\U0001f646\U0001f3fc\u200d\u2640|\U0001f646\U0001f3fd\u200d\u2640|\U0001f646\U0001f3fe\u200d\u2640|\U0001f646\U0001f3ff\u200d\u2640|\U0001f481\u200d\u2642\ufe0f|\U0001f481\U0001f3fb\u200d\u2642|\U0001f481\U0001f3fc\u200d\u2642|\U0001f481\U0001f3fd\u200d\u2642|\U0001f481\U0001f3fe\u200d\u2642|\U0001f481\U0001f3ff\u200d\u2642|\U0001f481\u200d\u2640\ufe0f|\U0001f481\U0001f3fb\u200d\u2640|\U0001f481\U0001f3fc\u200d\u2640|\U0001f481\U0001f3fd\u200d\u2640|\U0001f481\U0001f3fe\u200d\u2640|\U0001f481\U0001f3ff\u200d\u2640|\U0001f64b\u200d\u2642\ufe0f|\U0001f64b\U0001f3fb\u200d\u2642|\U0001f64b\U0001f3fc\u200d\u2642|\U0001f64b\U0001f3fd\u200d\u2642|\U0001f64b\U0001f3fe\u200d\u2642|\U0001f64b\U0001f3ff\u200d\u2642|\U0001f64b\u200d\u2640\ufe0f|\U0001f64b\U0001f3fb\u200d\u2640|\U0001f64b\U0001f3fc\u200d\u2640|\U0001f64b\U0001f3fd\u200d\u2640|\U0001f64b\U0001f3fe\u200d\u2640|\U0001f64b\U0001f3ff\u200d\u2640|\U0001f9cf\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2642|\U0001f9cf\U0001f3fc\u200d\u2642|\U0001f9cf\U0001f3fd\u200d\u2642|\U0001f9cf\U0001f3fe\u200d\u2642|\U0001f9cf\U0001f3ff\u200d\u2642|\U0001f9cf\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2640|\U0001f9cf\U0001f3fc\u200d\u2640|\U0001f9cf\U0001f3fd\u200d\u2640|\U0001f9cf\U0001f3fe\u200d\u2640|\U0001f9cf\U0001f3ff\u200d\u2640|\U0001f647\u200d\u2642\ufe0f|\U0001f647\U0001f3fb\u200d\u2642|\U0001f647\U0001f3fc\u200d\u2642|\U0001f647\U0001f3fd\u200d\u2642|\U0001f647\U0001f3fe\u200d\u2642|\U0001f647\U0001f3ff\u200d\u2642|\U0001f647\u200d\u2640\ufe0f|\U0001f647\U0001f3fb\u200d\u2640|\U0001f647\U0001f3fc\u200d\u2640|\U0001f647\U0001f3fd\u200d\u2640|\U0001f647\U0001f3fe\u200d\u2640|\U0001f647\U0001f3ff\u200d\u2640|\U0001f926\u200d\u2642\ufe0f|\U0001f926\U0001f3fb\u200d\u2642|\U0001f926\U0001f3fc\u200d\u2642|\U0001f926\U0001f3fd\u200d\u2642|\U0001f926\U0001f3fe\u200d\u2642|\U0001f926\U0001f3ff\u200d\u2642|\U0001f926\u200d\u2640\ufe0f|\U0001f926\U0001f3fb\u200d\u2640|\U0001f926\U0001f3fc\u200d\u2640|\U0001f926\U0001f3fd\u200d\u2640|\U0001f926\U0001f3fe\u200d\u2640|\U0001f926\U0001f3ff\u200d\u2640|\U0001f937\u200d\u2642\ufe0f|\U0001f937\U0001f3fb\u200d\u2642|\U0001f937\U0001f3fc\u200d\u2642|\U0001f937\U0001f3fd\u200d\u2642|\U0001f937\U0001f3fe\u200d\u2642|\U0001f937\U0001f3ff\u200d\u2642|\U0001f937\u200d\u2640\ufe0f|\U0001f937\U0001f3fb\u200d\u2640|\U0001f937\U0001f3fc\u200d\u2640|\U0001f937\U0001f3fd\u200d\u2640|\U0001f937\U0001f3fe\u200d\u2640|\U0001f937\U0001f3ff\u200d\u2640|\U0001f468\u200d\u2695\ufe0f|\U0001f468\U0001f3fb\u200d\u2695|\U0001f468\U0001f3fc\u200d\u2695|\U0001f468\U0001f3fd\u200d\u2695|\U0001f468\U0001f3fe\u200d\u2695|\U0001f468\U0001f3ff\u200d\u2695|\U0001f469\u200d\u2695\ufe0f|\U0001f469\U0001f3fb\u200d\u2695|\U0001f469\U0001f3fc\u200d\u2695|\U0001f469\U0001f3fd\u200d\u2695|\U0001f469\U0001f3fe\u200d\u2695|\U0001f469\U0001f3ff\u200d\u2695|\U0001f468\U0001f3fb\u200d\U0001f393|\U0001f468\U0001f3fc\u200d\U0001f393|\U0001f468\U0001f3fd\u200d\U0001f393|\U0001f468\U0001f3fe\u200d\U0001f393|\U0001f468\U0001f3ff\u200d\U0001f393|\U0001f469\U0001f3fb\u200d\U0001f393|\U0001f469\U0001f3fc\u200d\U0001f393|\U0001f469\U0001f3fd\u200d\U0001f393|\U0001f469\U0001f3fe\u200d\U0001f393|\U0001f469\U0001f3ff\u200d\U0001f393|\U0001f468\U0001f3fb\u200d\U0001f3eb|\U0001f468\U0001f3fc\u200d\U0001f3eb|\U0001f468\U0001f3fd\u200d\U0001f3eb|\U0001f468\U0001f3fe\u200d\U0001f3eb|\U0001f468\U0001f3ff\u200d\U0001f3eb|\U0001f469\U0001f3fb\u200d\U0001f3eb|\U0001f469\U0001f3fc\u200d\U0001f3eb|\U0001f469\U0001f3fd\u200d\U0001f3eb|\U0001f469\U0001f3fe\u200d\U0001f3eb|\U0001f469\U0001f3ff\u200d\U0001f3eb|\U0001f468\u200d\u2696\ufe0f|\U0001f468\U0001f3fb\u200d\u2696|\U0001f468\U0001f3fc\u200d\u2696|\U0001f468\U0001f3fd\u200d\u2696|\U0001f468\U0001f3fe\u200d\u2696|\U0001f468\U0001f3ff\u200d\u2696|\U0001f469\u200d\u2696\ufe0f|\U0001f469\U0001f3fb\u200d\u2696|\U0001f469\U0001f3fc\u200d\u2696|\U0001f469\U0001f3fd\u200d\u2696|\U0001f469\U0001f3fe\u200d\u2696|\U0001f469\U0001f3ff\u200d\u2696|\U0001f468\U0001f3fb\u200d\U0001f33e|\U0001f468\U0001f3fc\u200d\U0001f33e|\U0001f468\U0001f3fd\u200d\U0001f33e|\U0001f468\U0001f3fe\u200d\U0001f33e|\U0001f468\U0001f3ff\u200d\U0001f33e|\U0001f469\U0001f3fb\u200d\U0001f33e|\U0001f469\U0001f3fc\u200d\U0001f33e|\U0001f469\U0001f3fd\u200d\U0001f33e|\U0001f469\U0001f3fe\u200d\U0001f33e|\U0001f469\U0001f3ff\u200d\U0001f33e|\U0001f468\U0001f3fb\u200d\U0001f373|\U0001f468\U0001f3fc\u200d\U0001f373|\U0001f468\U0001f3fd\u200d\U0001f373|\U0001f468\U0001f3fe\u200d\U0001f373|\U0001f468\U0001f3ff\u200d\U0001f373|\U0001f469\U0001f3fb\u200d\U0001f373|\U0001f469\U0001f3fc\u200d\U0001f373|\U0001f469\U0001f3fd\u200d\U0001f373|\U0001f469\U0001f3fe\u200d\U0001f373|\U0001f469\U0001f3ff\u200d\U0001f373|\U0001f468\U0001f3fb\u200d\U0001f527|\U0001f468\U0001f3fc\u200d\U0001f527|\U0001f468\U0001f3fd\u200d\U0001f527|\U0001f468\U0001f3fe\u200d\U0001f527|\U0001f468\U0001f3ff\u200d\U0001f527|\U0001f469\U0001f3fb\u200d\U0001f527|\U0001f469\U0001f3fc\u200d\U0001f527|\U0001f469\U0001f3fd\u200d\U0001f527|\U0001f469\U0001f3fe\u200d\U0001f527|\U0001f469\U0001f3ff\u200d\U0001f527|\U0001f468\U0001f3fb\u200d\U0001f3ed|\U0001f468\U0001f3fc\u200d\U0001f3ed|\U0001f468\U0001f3fd\u200d\U0001f3ed|\U0001f468\U0001f3fe\u200d\U0001f3ed|\U0001f468\U0001f3ff\u200d\U0001f3ed|\U0001f469\U0001f3fb\u200d\U0001f3ed|\U0001f469\U0001f3fc\u200d\U0001f3ed|\U0001f469\U0001f3fd\u200d\U0001f3ed|\U0001f469\U0001f3fe\u200d\U0001f3ed|\U0001f469\U0001f3ff\u200d\U0001f3ed|\U0001f468\U0001f3fb\u200d\U0001f4bc|\U0001f468\U0001f3fc\u200d\U0001f4bc|\U0001f468\U0001f3fd\u200d\U0001f4bc|\U0001f468\U0001f3fe\u200d\U0001f4bc|\U0001f468\U0001f3ff\u200d\U0001f4bc|\U0001f469\U0001f3fb\u200d\U0001f4bc|\U0001f469\U0001f3fc\u200d\U0001f4bc|\U0001f469\U0001f3fd\u200d\U0001f4bc|\U0001f469\U0001f3fe\u200d\U0001f4bc|\U0001f469\U0001f3ff\u200d\U0001f4bc|\U0001f468\U0001f3fb\u200d\U0001f52c|\U0001f468\U0001f3fc\u200d\U0001f52c|\U0001f468\U0001f3fd\u200d\U0001f52c|\U0001f468\U0001f3fe\u200d\U0001f52c|\U0001f468\U0001f3ff\u200d\U0001f52c|\U0001f469\U0001f3fb\u200d\U0001f52c|\U0001f469\U0001f3fc\u200d\U0001f52c|\U0001f469\U0001f3fd\u200d\U0001f52c|\U0001f469\U0001f3fe\u200d\U0001f52c|\U0001f469\U0001f3ff\u200d\U0001f52c|\U0001f468\U0001f3fb\u200d\U0001f4bb|\U0001f468\U0001f3fc\u200d\U0001f4bb|\U0001f468\U0001f3fd\u200d\U0001f4bb|\U0001f468\U0001f3fe\u200d\U0001f4bb|\U0001f468\U0001f3ff\u200d\U0001f4bb|\U0001f469\U0001f3fb\u200d\U0001f4bb|\U0001f469\U0001f3fc\u200d\U0001f4bb|\U0001f469\U0001f3fd\u200d\U0001f4bb|\U0001f469\U0001f3fe\u200d\U0001f4bb|\U0001f469\U0001f3ff\u200d\U0001f4bb|\U0001f468\U0001f3fb\u200d\U0001f3a4|\U0001f468\U0001f3fc\u200d\U0001f3a4|\U0001f468\U0001f3fd\u200d\U0001f3a4|\U0001f468\U0001f3fe\u200d\U0001f3a4|\U0001f468\U0001f3ff\u200d\U0001f3a4|\U0001f469\U0001f3fb\u200d\U0001f3a4|\U0001f469\U0001f3fc\u200d\U0001f3a4|\U0001f469\U0001f3fd\u200d\U0001f3a4|\U0001f469\U0001f3fe\u200d\U0001f3a4|\U0001f469\U0001f3ff\u200d\U0001f3a4|\U0001f468\U0001f3fb\u200d\U0001f3a8|\U0001f468\U0001f3fc\u200d\U0001f3a8|\U0001f468\U0001f3fd\u200d\U0001f3a8|\U0001f468\U0001f3fe\u200d\U0001f3a8|\U0001f468\U0001f3ff\u200d\U0001f3a8|\U0001f469\U0001f3fb\u200d\U0001f3a8|\U0001f469\U0001f3fc\u200d\U0001f3a8|\U0001f469\U0001f3fd\u200d\U0001f3a8|\U0001f469\U0001f3fe\u200d\U0001f3a8|\U0001f469\U0001f3ff\u200d\U0001f3a8|\U0001f468\u200d\u2708\ufe0f|\U0001f468\U0001f3fb\u200d\u2708|\U0001f468\U0001f3fc\u200d\u2708|\U0001f468\U0001f3fd\u200d\u2708|\U0001f468\U0001f3fe\u200d\u2708|\U0001f468\U0001f3ff\u200d\u2708|\U0001f469\u200d\u2708\ufe0f|\U0001f469\U0001f3fb\u200d\u2708|\U0001f469\U0001f3fc\u200d\u2708|\U0001f469\U0001f3fd\u200d\u2708|\U0001f469\U0001f3fe\u200d\u2708|\U0001f469\U0001f3ff\u200d\u2708|\U0001f468\U0001f3fb\u200d\U0001f680|\U0001f468\U0001f3fc\u200d\U0001f680|\U0001f468\U0001f3fd\u200d\U0001f680|\U0001f468\U0001f3fe\u200d\U0001f680|\U0001f468\U0001f3ff\u200d\U0001f680|\U0001f469\U0001f3fb\u200d\U0001f680|\U0001f469\U0001f3fc\u200d\U0001f680|\U0001f469\U0001f3fd\u200d\U0001f680|\U0001f469\U0001f3fe\u200d\U0001f680|\U0001f469\U0001f3ff\u200d\U0001f680|\U0001f468\U0001f3fb\u200d\U0001f692|\U0001f468\U0001f3fc\u200d\U0001f692|\U0001f468\U0001f3fd\u200d\U0001f692|\U0001f468\U0001f3fe\u200d\U0001f692|\U0001f468\U0001f3ff\u200d\U0001f692|\U0001f469\U0001f3fb\u200d\U0001f692|\U0001f469\U0001f3fc\u200d\U0001f692|\U0001f469\U0001f3fd\u200d\U0001f692|\U0001f469\U0001f3fe\u200d\U0001f692|\U0001f469\U0001f3ff\u200d\U0001f692|\U0001f46e\u200d\u2642\ufe0f|\U0001f46e\U0001f3fb\u200d\u2642|\U0001f46e\U0001f3fc\u200d\u2642|\U0001f46e\U0001f3fd\u200d\u2642|\U0001f46e\U0001f3fe\u200d\u2642|\U0001f46e\U0001f3ff\u200d\u2642|\U0001f46e\u200d\u2640\ufe0f|\U0001f46e\U0001f3fb\u200d\u2640|\U0001f46e\U0001f3fc\u200d\u2640|\U0001f46e\U0001f3fd\u200d\u2640|\U0001f46e\U0001f3fe\u200d\u2640|\U0001f46e\U0001f3ff\u200d\u2640|\U0001f575\u200d\u2642\ufe0f|\U0001f575\ufe0f\u200d\u2642|\U0001f575\U0001f3fb\u200d\u2642|\U0001f575\U0001f3fc\u200d\u2642|\U0001f575\U0001f3fd\u200d\u2642|\U0001f575\U0001f3fe\u200d\u2642|\U0001f575\U0001f3ff\u200d\u2642|\U0001f575\u200d\u2640\ufe0f|\U0001f575\ufe0f\u200d\u2640|\U0001f575\U0001f3fb\u200d\u2640|\U0001f575\U0001f3fc\u200d\u2640|\U0001f575\U0001f3fd\u200d\u2640|\U0001f575\U0001f3fe\u200d\u2640|\U0001f575\U0001f3ff\u200d\u2640|\U0001f482\u200d\u2642\ufe0f|\U0001f482\U0001f3fb\u200d\u2642|\U0001f482\U0001f3fc\u200d\u2642|\U0001f482\U0001f3fd\u200d\u2642|\U0001f482\U0001f3fe\u200d\u2642|\U0001f482\U0001f3ff\u200d\u2642|\U0001f482\u200d\u2640\ufe0f|\U0001f482\U0001f3fb\u200d\u2640|\U0001f482\U0001f3fc\u200d\u2640|\U0001f482\U0001f3fd\u200d\u2640|\U0001f482\U0001f3fe\u200d\u2640|\U0001f482\U0001f3ff\u200d\u2640|\U0001f477\u200d\u2642\ufe0f|\U0001f477\U0001f3fb\u200d\u2642|\U0001f477\U0001f3fc\u200d\u2642|\U0001f477\U0001f3fd\u200d\u2642|\U0001f477\U0001f3fe\u200d\u2642|\U0001f477\U0001f3ff\u200d\u2642|\U0001f477\u200d\u2640\ufe0f|\U0001f477\U0001f3fb\u200d\u2640|\U0001f477\U0001f3fc\u200d\u2640|\U0001f477\U0001f3fd\u200d\u2640|\U0001f477\U0001f3fe\u200d\u2640|\U0001f477\U0001f3ff\u200d\u2640|\U0001f473\u200d\u2642\ufe0f|\U0001f473\U0001f3fb\u200d\u2642|\U0001f473\U0001f3fc\u200d\u2642|\U0001f473\U0001f3fd\u200d\u2642|\U0001f473\U0001f3fe\u200d\u2642|\U0001f473\U0001f3ff\u200d\u2642|\U0001f473\u200d\u2640\ufe0f|\U0001f473\U0001f3fb\u200d\u2640|\U0001f473\U0001f3fc\u200d\u2640|\U0001f473\U0001f3fd\u200d\u2640|\U0001f473\U0001f3fe\u200d\u2640|\U0001f473\U0001f3ff\u200d\u2640|\U0001f9b8\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2642|\U0001f9b8\U0001f3fc\u200d\u2642|\U0001f9b8\U0001f3fd\u200d\u2642|\U0001f9b8\U0001f3fe\u200d\u2642|\U0001f9b8\U0001f3ff\u200d\u2642|\U0001f9b8\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2640|\U0001f9b8\U0001f3fc\u200d\u2640|\U0001f9b8\U0001f3fd\u200d\u2640|\U0001f9b8\U0001f3fe\u200d\u2640|\U0001f9b8\U0001f3ff\u200d\u2640|\U0001f9b9\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2642|\U0001f9b9\U0001f3fc\u200d\u2642|\U0001f9b9\U0001f3fd\u200d\u2642|\U0001f9b9\U0001f3fe\u200d\u2642|\U0001f9b9\U0001f3ff\u200d\u2642|\U0001f9b9\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2640|\U0001f9b9\U0001f3fc\u200d\u2640|\U0001f9b9\U0001f3fd\u200d\u2640|\U0001f9b9\U0001f3fe\u200d\u2640|\U0001f9b9\U0001f3ff\u200d\u2640|\U0001f9d9\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2642|\U0001f9d9\U0001f3fc\u200d\u2642|\U0001f9d9\U0001f3fd\u200d\u2642|\U0001f9d9\U0001f3fe\u200d\u2642|\U0001f9d9\U0001f3ff\u200d\u2642|\U0001f9d9\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2640|\U0001f9d9\U0001f3fc\u200d\u2640|\U0001f9d9\U0001f3fd\u200d\u2640|\U0001f9d9\U0001f3fe\u200d\u2640|\U0001f9d9\U0001f3ff\u200d\u2640|\U0001f9da\u200d\u2642\ufe0f|\U0001f9da\U0001f3fb\u200d\u2642|\U0001f9da\U0001f3fc\u200d\u2642|\U0001f9da\U0001f3fd\u200d\u2642|\U0001f9da\U0001f3fe\u200d\u2642|\U0001f9da\U0001f3ff\u200d\u2642|\U0001f9da\u200d\u2640\ufe0f|\U0001f9da\U0001f3fb\u200d\u2640|\U0001f9da\U0001f3fc\u200d\u2640|\U0001f9da\U0001f3fd\u200d\u2640|\U0001f9da\U0001f3fe\u200d\u2640|\U0001f9da\U0001f3ff\u200d\u2640|\U0001f9db\u200d\u2642\ufe0f|\U0001f9db\U0001f3fb\u200d\u2642|\U0001f9db\U0001f3fc\u200d\u2642|\U0001f9db\U0001f3fd\u200d\u2642|\U0001f9db\U0001f3fe\u200d\u2642|\U0001f9db\U0001f3ff\u200d\u2642|\U0001f9db\u200d\u2640\ufe0f|\U0001f9db\U0001f3fb\u200d\u2640|\U0001f9db\U0001f3fc\u200d\u2640|\U0001f9db\U0001f3fd\u200d\u2640|\U0001f9db\U0001f3fe\u200d\u2640|\U0001f9db\U0001f3ff\u200d\u2640|\U0001f9dc\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2642|\U0001f9dc\U0001f3fc\u200d\u2642|\U0001f9dc\U0001f3fd\u200d\u2642|\U0001f9dc\U0001f3fe\u200d\u2642|\U0001f9dc\U0001f3ff\u200d\u2642|\U0001f9dc\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2640|\U0001f9dc\U0001f3fc\u200d\u2640|\U0001f9dc\U0001f3fd\u200d\u2640|\U0001f9dc\U0001f3fe\u200d\u2640|\U0001f9dc\U0001f3ff\u200d\u2640|\U0001f9dd\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2642|\U0001f9dd\U0001f3fc\u200d\u2642|\U0001f9dd\U0001f3fd\u200d\u2642|\U0001f9dd\U0001f3fe\u200d\u2642|\U0001f9dd\U0001f3ff\u200d\u2642|\U0001f9dd\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2640|\U0001f9dd\U0001f3fc\u200d\u2640|\U0001f9dd\U0001f3fd\u200d\u2640|\U0001f9dd\U0001f3fe\u200d\u2640|\U0001f9dd\U0001f3ff\u200d\u2640|\U0001f9de\u200d\u2642\ufe0f|\U0001f9de\u200d\u2640\ufe0f|\U0001f9df\u200d\u2642\ufe0f|\U0001f9df\u200d\u2640\ufe0f|\U0001f486\u200d\u2642\ufe0f|\U0001f486\U0001f3fb\u200d\u2642|\U0001f486\U0001f3fc\u200d\u2642|\U0001f486\U0001f3fd\u200d\u2642|\U0001f486\U0001f3fe\u200d\u2642|\U0001f486\U0001f3ff\u200d\u2642|\U0001f486\u200d\u2640\ufe0f|\U0001f486\U0001f3fb\u200d\u2640|\U0001f486\U0001f3fc\u200d\u2640|\U0001f486\U0001f3fd\u200d\u2640|\U0001f486\U0001f3fe\u200d\u2640|\U0001f486\U0001f3ff\u200d\u2640|\U0001f487\u200d\u2642\ufe0f|\U0001f487\U0001f3fb\u200d\u2642|\U0001f487\U0001f3fc\u200d\u2642|\U0001f487\U0001f3fd\u200d\u2642|\U0001f487\U0001f3fe\u200d\u2642|\U0001f487\U0001f3ff\u200d\u2642|\U0001f487\u200d\u2640\ufe0f|\U0001f487\U0001f3fb\u200d\u2640|\U0001f487\U0001f3fc\u200d\u2640|\U0001f487\U0001f3fd\u200d\u2640|\U0001f487\U0001f3fe\u200d\u2640|\U0001f487\U0001f3ff\u200d\u2640|\U0001f6b6\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2642|\U0001f6b6\U0001f3fc\u200d\u2642|\U0001f6b6\U0001f3fd\u200d\u2642|\U0001f6b6\U0001f3fe\u200d\u2642|\U0001f6b6\U0001f3ff\u200d\u2642|\U0001f6b6\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2640|\U0001f6b6\U0001f3fc\u200d\u2640|\U0001f6b6\U0001f3fd\u200d\u2640|\U0001f6b6\U0001f3fe\u200d\u2640|\U0001f6b6\U0001f3ff\u200d\u2640|\U0001f9cd\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2642|\U0001f9cd\U0001f3fc\u200d\u2642|\U0001f9cd\U0001f3fd\u200d\u2642|\U0001f9cd\U0001f3fe\u200d\u2642|\U0001f9cd\U0001f3ff\u200d\u2642|\U0001f9cd\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2640|\U0001f9cd\U0001f3fc\u200d\u2640|\U0001f9cd\U0001f3fd\u200d\u2640|\U0001f9cd\U0001f3fe\u200d\u2640|\U0001f9cd\U0001f3ff\u200d\u2640|\U0001f9ce\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2642|\U0001f9ce\U0001f3fc\u200d\u2642|\U0001f9ce\U0001f3fd\u200d\u2642|\U0001f9ce\U0001f3fe\u200d\u2642|\U0001f9ce\U0001f3ff\u200d\u2642|\U0001f9ce\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2640|\U0001f9ce\U0001f3fc\u200d\u2640|\U0001f9ce\U0001f3fd\u200d\u2640|\U0001f9ce\U0001f3fe\u200d\u2640|\U0001f9ce\U0001f3ff\u200d\u2640|\U0001f468\U0001f3fb\u200d\U0001f9af|\U0001f468\U0001f3fc\u200d\U0001f9af|\U0001f468\U0001f3fd\u200d\U0001f9af|\U0001f468\U0001f3fe\u200d\U0001f9af|\U0001f468\U0001f3ff\u200d\U0001f9af|\U0001f469\U0001f3fb\u200d\U0001f9af|\U0001f469\U0001f3fc\u200d\U0001f9af|\U0001f469\U0001f3fd\u200d\U0001f9af|\U0001f469\U0001f3fe\u200d\U0001f9af|\U0001f469\U0001f3ff\u200d\U0001f9af|\U0001f468\U0001f3fb\u200d\U0001f9bc|\U0001f468\U0001f3fc\u200d\U0001f9bc|\U0001f468\U0001f3fd\u200d\U0001f9bc|\U0001f468\U0001f3fe\u200d\U0001f9bc|\U0001f468\U0001f3ff\u200d\U0001f9bc|\U0001f469\U0001f3fb\u200d\U0001f9bc|\U0001f469\U0001f3fc\u200d\U0001f9bc|\U0001f469\U0001f3fd\u200d\U0001f9bc|\U0001f469\U0001f3fe\u200d\U0001f9bc|\U0001f469\U0001f3ff\u200d\U0001f9bc|\U0001f468\U0001f3fb\u200d\U0001f9bd|\U0001f468\U0001f3fc\u200d\U0001f9bd|\U0001f468\U0001f3fd\u200d\U0001f9bd|\U0001f468\U0001f3fe\u200d\U0001f9bd|\U0001f468\U0001f3ff\u200d\U0001f9bd|\U0001f469\U0001f3fb\u200d\U0001f9bd|\U0001f469\U0001f3fc\u200d\U0001f9bd|\U0001f469\U0001f3fd\u200d\U0001f9bd|\U0001f469\U0001f3fe\u200d\U0001f9bd|\U0001f469\U0001f3ff\u200d\U0001f9bd|\U0001f3c3\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2642|\U0001f3c3\U0001f3fc\u200d\u2642|\U0001f3c3\U0001f3fd\u200d\u2642|\U0001f3c3\U0001f3fe\u200d\u2642|\U0001f3c3\U0001f3ff\u200d\u2642|\U0001f3c3\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2640|\U0001f3c3\U0001f3fc\u200d\u2640|\U0001f3c3\U0001f3fd\u200d\u2640|\U0001f3c3\U0001f3fe\u200d\u2640|\U0001f3c3\U0001f3ff\u200d\u2640|\U0001f46f\u200d\u2642\ufe0f|\U0001f46f\u200d\u2640\ufe0f|\U0001f9d6\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2642|\U0001f9d6\U0001f3fc\u200d\u2642|\U0001f9d6\U0001f3fd\u200d\u2642|\U0001f9d6\U0001f3fe\u200d\u2642|\U0001f9d6\U0001f3ff\u200d\u2642|\U0001f9d6\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2640|\U0001f9d6\U0001f3fc\u200d\u2640|\U0001f9d6\U0001f3fd\u200d\u2640|\U0001f9d6\U0001f3fe\u200d\u2640|\U0001f9d6\U0001f3ff\u200d\u2640|\U0001f9d7\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2642|\U0001f9d7\U0001f3fc\u200d\u2642|\U0001f9d7\U0001f3fd\u200d\u2642|\U0001f9d7\U0001f3fe\u200d\u2642|\U0001f9d7\U0001f3ff\u200d\u2642|\U0001f9d7\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2640|\U0001f9d7\U0001f3fc\u200d\u2640|\U0001f9d7\U0001f3fd\u200d\u2640|\U0001f9d7\U0001f3fe\u200d\u2640|\U0001f9d7\U0001f3ff\u200d\u2640|\U0001f3cc\u200d\u2642\ufe0f|\U0001f3cc\ufe0f\u200d\u2642|\U0001f3cc\U0001f3fb\u200d\u2642|\U0001f3cc\U0001f3fc\u200d\u2642|\U0001f3cc\U0001f3fd\u200d\u2642|\U0001f3cc\U0001f3fe\u200d\u2642|\U0001f3cc\U0001f3ff\u200d\u2642|\U0001f3cc\u200d\u2640\ufe0f|\U0001f3cc\ufe0f\u200d\u2640|\U0001f3cc\U0001f3fb\u200d\u2640|\U0001f3cc\U0001f3fc\u200d\u2640|\U0001f3cc\U0001f3fd\u200d\u2640|\U0001f3cc\U0001f3fe\u200d\u2640|\U0001f3cc\U0001f3ff\u200d\u2640|\U0001f3c4\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2642|\U0001f3c4\U0001f3fc\u200d\u2642|\U0001f3c4\U0001f3fd\u200d\u2642|\U0001f3c4\U0001f3fe\u200d\u2642|\U0001f3c4\U0001f3ff\u200d\u2642|\U0001f3c4\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2640|\U0001f3c4\U0001f3fc\u200d\u2640|\U0001f3c4\U0001f3fd\u200d\u2640|\U0001f3c4\U0001f3fe\u200d\u2640|\U0001f3c4\U0001f3ff\u200d\u2640|\U0001f6a3\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2642|\U0001f6a3\U0001f3fc\u200d\u2642|\U0001f6a3\U0001f3fd\u200d\u2642|\U0001f6a3\U0001f3fe\u200d\u2642|\U0001f6a3\U0001f3ff\u200d\u2642|\U0001f6a3\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2640|\U0001f6a3\U0001f3fc\u200d\u2640|\U0001f6a3\U0001f3fd\u200d\u2640|\U0001f6a3\U0001f3fe\u200d\u2640|\U0001f6a3\U0001f3ff\u200d\u2640|\U0001f3ca\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2642|\U0001f3ca\U0001f3fc\u200d\u2642|\U0001f3ca\U0001f3fd\u200d\u2642|\U0001f3ca\U0001f3fe\u200d\u2642|\U0001f3ca\U0001f3ff\u200d\u2642|\U0001f3ca\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2640|\U0001f3ca\U0001f3fc\u200d\u2640|\U0001f3ca\U0001f3fd\u200d\u2640|\U0001f3ca\U0001f3fe\u200d\u2640|\U0001f3ca\U0001f3ff\u200d\u2640|\u26f9\u200d\u2642\ufe0f|\u26f9\ufe0f\u200d\u2642|\u26f9\U0001f3fb\u200d\u2642|\u26f9\U0001f3fc\u200d\u2642|\u26f9\U0001f3fd\u200d\u2642|\u26f9\U0001f3fe\u200d\u2642|\u26f9\U0001f3ff\u200d\u2642|\u26f9\u200d\u2640\ufe0f|\u26f9\ufe0f\u200d\u2640|\u26f9\U0001f3fb\u200d\u2640|\u26f9\U0001f3fc\u200d\u2640|\u26f9\U0001f3fd\u200d\u2640|\u26f9\U0001f3fe\u200d\u2640|\u26f9\U0001f3ff\u200d\u2640|\U0001f3cb\u200d\u2642\ufe0f|\U0001f3cb\ufe0f\u200d\u2642|\U0001f3cb\U0001f3fb\u200d\u2642|\U0001f3cb\U0001f3fc\u200d\u2642|\U0001f3cb\U0001f3fd\u200d\u2642|\U0001f3cb\U0001f3fe\u200d\u2642|\U0001f3cb\U0001f3ff\u200d\u2642|\U0001f3cb\u200d\u2640\ufe0f|\U0001f3cb\ufe0f\u200d\u2640|\U0001f3cb\U0001f3fb\u200d\u2640|\U0001f3cb\U0001f3fc\u200d\u2640|\U0001f3cb\U0001f3fd\u200d\u2640|\U0001f3cb\U0001f3fe\u200d\u2640|\U0001f3cb\U0001f3ff\u200d\u2640|\U0001f6b4\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2642|\U0001f6b4\U0001f3fc\u200d\u2642|\U0001f6b4\U0001f3fd\u200d\u2642|\U0001f6b4\U0001f3fe\u200d\u2642|\U0001f6b4\U0001f3ff\u200d\u2642|\U0001f6b4\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2640|\U0001f6b4\U0001f3fc\u200d\u2640|\U0001f6b4\U0001f3fd\u200d\u2640|\U0001f6b4\U0001f3fe\u200d\u2640|\U0001f6b4\U0001f3ff\u200d\u2640|\U0001f6b5\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2642|\U0001f6b5\U0001f3fc\u200d\u2642|\U0001f6b5\U0001f3fd\u200d\u2642|\U0001f6b5\U0001f3fe\u200d\u2642|\U0001f6b5\U0001f3ff\u200d\u2642|\U0001f6b5\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2640|\U0001f6b5\U0001f3fc\u200d\u2640|\U0001f6b5\U0001f3fd\u200d\u2640|\U0001f6b5\U0001f3fe\u200d\u2640|\U0001f6b5\U0001f3ff\u200d\u2640|\U0001f938\u200d\u2642\ufe0f|\U0001f938\U0001f3fb\u200d\u2642|\U0001f938\U0001f3fc\u200d\u2642|\U0001f938\U0001f3fd\u200d\u2642|\U0001f938\U0001f3fe\u200d\u2642|\U0001f938\U0001f3ff\u200d\u2642|\U0001f938\u200d\u2640\ufe0f|\U0001f938\U0001f3fb\u200d\u2640|\U0001f938\U0001f3fc\u200d\u2640|\U0001f938\U0001f3fd\u200d\u2640|\U0001f938\U0001f3fe\u200d\u2640|\U0001f938\U0001f3ff\u200d\u2640|\U0001f93c\u200d\u2642\ufe0f|\U0001f93c\u200d\u2640\ufe0f|\U0001f93d\u200d\u2642\ufe0f|\U0001f93d\U0001f3fb\u200d\u2642|\U0001f93d\U0001f3fc\u200d\u2642|\U0001f93d\U0001f3fd\u200d\u2642|\U0001f93d\U0001f3fe\u200d\u2642|\U0001f93d\U0001f3ff\u200d\u2642|\U0001f93d\u200d\u2640\ufe0f|\U0001f93d\U0001f3fb\u200d\u2640|\U0001f93d\U0001f3fc\u200d\u2640|\U0001f93d\U0001f3fd\u200d\u2640|\U0001f93d\U0001f3fe\u200d\u2640|\U0001f93d\U0001f3ff\u200d\u2640|\U0001f93e\u200d\u2642\ufe0f|\U0001f93e\U0001f3fb\u200d\u2642|\U0001f93e\U0001f3fc\u200d\u2642|\U0001f93e\U0001f3fd\u200d\u2642|\U0001f93e\U0001f3fe\u200d\u2642|\U0001f93e\U0001f3ff\u200d\u2642|\U0001f93e\u200d\u2640\ufe0f|\U0001f93e\U0001f3fb\u200d\u2640|\U0001f93e\U0001f3fc\u200d\u2640|\U0001f93e\U0001f3fd\u200d\u2640|\U0001f93e\U0001f3fe\u200d\u2640|\U0001f93e\U0001f3ff\u200d\u2640|\U0001f939\u200d\u2642\ufe0f|\U0001f939\U0001f3fb\u200d\u2642|\U0001f939\U0001f3fc\u200d\u2642|\U0001f939\U0001f3fd\u200d\u2642|\U0001f939\U0001f3fe\u200d\u2642|\U0001f939\U0001f3ff\u200d\u2642|\U0001f939\u200d\u2640\ufe0f|\U0001f939\U0001f3fb\u200d\u2640|\U0001f939\U0001f3fc\u200d\u2640|\U0001f939\U0001f3fd\u200d\u2640|\U0001f939\U0001f3fe\u200d\u2640|\U0001f939\U0001f3ff\u200d\u2640|\U0001f9d8\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2642|\U0001f9d8\U0001f3fc\u200d\u2642|\U0001f9d8\U0001f3fd\u200d\u2642|\U0001f9d8\U0001f3fe\u200d\u2642|\U0001f9d8\U0001f3ff\u200d\u2642|\U0001f9d8\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2640|\U0001f9d8\U0001f3fc\u200d\u2640|\U0001f9d8\U0001f3fd\u200d\u2640|\U0001f9d8\U0001f3fe\u200d\u2640|\U0001f9d8\U0001f3ff\u200d\u2640|\U0001f3f3\ufe0f\u200d\U0001f308|\U0001f3f4\u200d\u2620\ufe0f|\U0001f441\u200d\U0001f5e8|\U0001f471\u200d\u2642|\U0001f468\u200d\U0001f9b0|\U0001f468\u200d\U0001f9b1|\U0001f468\u200d\U0001f9b3|\U0001f468\u200d\U0001f9b2|\U0001f471\u200d\u2640|\U0001f469\u200d\U0001f9b0|\U0001f469\u200d\U0001f9b1|\U0001f469\u200d\U0001f9b3|\U0001f469\u200d\U0001f9b2|\U0001f64d\u200d\u2642|\U0001f64d\u200d\u2640|\U0001f64e\u200d\u2642|\U0001f64e\u200d\u2640|\U0001f645\u200d\u2642|\U0001f645\u200d\u2640|\U0001f646\u200d\u2642|\U0001f646\u200d\u2640|\U0001f481\u200d\u2642|\U0001f481\u200d\u2640|\U0001f64b\u200d\u2642|\U0001f64b\u200d\u2640|\U0001f9cf\u200d\u2642|\U0001f9cf\u200d\u2640|\U0001f647\u200d\u2642|\U0001f647\u200d\u2640|\U0001f926\u200d\u2642|\U0001f926\u200d\u2640|\U0001f937\u200d\u2642|\U0001f937\u200d\u2640|\U0001f468\u200d\u2695|\U0001f469\u200d\u2695|\U0001f468\u200d\U0001f393|\U0001f469\u200d\U0001f393|\U0001f468\u200d\U0001f3eb|\U0001f469\u200d\U0001f3eb|\U0001f468\u200d\u2696|\U0001f469\u200d\u2696|\U0001f468\u200d\U0001f33e|\U0001f469\u200d\U0001f33e|\U0001f468\u200d\U0001f373|\U0001f469\u200d\U0001f373|\U0001f468\u200d\U0001f527|\U0001f469\u200d\U0001f527|\U0001f468\u200d\U0001f3ed|\U0001f469\u200d\U0001f3ed|\U0001f468\u200d\U0001f4bc|\U0001f469\u200d\U0001f4bc|\U0001f468\u200d\U0001f52c|\U0001f469\u200d\U0001f52c|\U0001f468\u200d\U0001f4bb|\U0001f469\u200d\U0001f4bb|\U0001f468\u200d\U0001f3a4|\U0001f469\u200d\U0001f3a4|\U0001f468\u200d\U0001f3a8|\U0001f469\u200d\U0001f3a8|\U0001f468\u200d\u2708|\U0001f469\u200d\u2708|\U0001f468\u200d\U0001f680|\U0001f469\u200d\U0001f680|\U0001f468\u200d\U0001f692|\U0001f469\u200d\U0001f692|\U0001f46e\u200d\u2642|\U0001f46e\u200d\u2640|\U0001f575\u200d\u2642|\U0001f575\u200d\u2640|\U0001f482\u200d\u2642|\U0001f482\u200d\u2640|\U0001f477\u200d\u2642|\U0001f477\u200d\u2640|\U0001f473\u200d\u2642|\U0001f473\u200d\u2640|\U0001f9b8\u200d\u2642|\U0001f9b8\u200d\u2640|\U0001f9b9\u200d\u2642|\U0001f9b9\u200d\u2640|\U0001f9d9\u200d\u2642|\U0001f9d9\u200d\u2640|\U0001f9da\u200d\u2642|\U0001f9da\u200d\u2640|\U0001f9db\u200d\u2642|\U0001f9db\u200d\u2640|\U0001f9dc\u200d\u2642|\U0001f9dc\u200d\u2640|\U0001f9dd\u200d\u2642|\U0001f9dd\u200d\u2640|\U0001f9de\u200d\u2642|\U0001f9de\u200d\u2640|\U0001f9df\u200d\u2642|\U0001f9df\u200d\u2640|\U0001f486\u200d\u2642|\U0001f486\u200d\u2640|\U0001f487\u200d\u2642|\U0001f487\u200d\u2640|\U0001f6b6\u200d\u2642|\U0001f6b6\u200d\u2640|\U0001f9cd\u200d\u2642|\U0001f9cd\u200d\u2640|\U0001f9ce\u200d\u2642|\U0001f9ce\u200d\u2640|\U0001f468\u200d\U0001f9af|\U0001f469\u200d\U0001f9af|\U0001f468\u200d\U0001f9bc|\U0001f469\u200d\U0001f9bc|\U0001f468\u200d\U0001f9bd|\U0001f469\u200d\U0001f9bd|\U0001f3c3\u200d\u2642|\U0001f3c3\u200d\u2640|\U0001f46f\u200d\u2642|\U0001f46f\u200d\u2640|\U0001f9d6\u200d\u2642|\U0001f9d6\u200d\u2640|\U0001f9d7\u200d\u2642|\U0001f9d7\u200d\u2640|\U0001f3cc\u200d\u2642|\U0001f3cc\u200d\u2640|\U0001f3c4\u200d\u2642|\U0001f3c4\u200d\u2640|\U0001f6a3\u200d\u2642|\U0001f6a3\u200d\u2640|\U0001f3ca\u200d\u2642|\U0001f3ca\u200d\u2640|\u26f9\u200d\u2642|\u26f9\u200d\u2640|\U0001f3cb\u200d\u2642|\U0001f3cb\u200d\u2640|\U0001f6b4\u200d\u2642|\U0001f6b4\u200d\u2640|\U0001f6b5\u200d\u2642|\U0001f6b5\u200d\u2640|\U0001f938\u200d\u2642|\U0001f938\u200d\u2640|\U0001f93c\u200d\u2642|\U0001f93c\u200d\u2640|\U0001f93d\u200d\u2642|\U0001f93d\u200d\u2640|\U0001f93e\u200d\u2642|\U0001f93e\u200d\u2640|\U0001f939\u200d\u2642|\U0001f939\u200d\u2640|\U0001f9d8\u200d\u2642|\U0001f9d8\u200d\u2640|\U0001f468\u200d\U0001f466|\U0001f468\u200d\U0001f467|\U0001f469\u200d\U0001f466|\U0001f469\u200d\U0001f467|\U0001f415\u200d\U0001f9ba|\\#\ufe0f\u20e3|\\*\ufe0f\u20e3|0\ufe0f\u20e3|1\ufe0f\u20e3|2\ufe0f\u20e3|3\ufe0f\u20e3|4\ufe0f\u20e3|5\ufe0f\u20e3|6\ufe0f\u20e3|7\ufe0f\u20e3|8\ufe0f\u20e3|9\ufe0f\u20e3|\U0001f3f3\u200d\U0001f308|\U0001f3f4\u200d\u2620|\u263a\ufe0f|\u2639\ufe0f|\u2620\ufe0f|\u2763\ufe0f|\u2764\ufe0f|\U0001f573\ufe0f|\U0001f5e8\ufe0f|\U0001f5ef\ufe0f|\U0001f44b\U0001f3fb|\U0001f44b\U0001f3fc|\U0001f44b\U0001f3fd|\U0001f44b\U0001f3fe|\U0001f44b\U0001f3ff|\U0001f91a\U0001f3fb|\U0001f91a\U0001f3fc|\U0001f91a\U0001f3fd|\U0001f91a\U0001f3fe|\U0001f91a\U0001f3ff|\U0001f590\ufe0f|\U0001f590\U0001f3fb|\U0001f590\U0001f3fc|\U0001f590\U0001f3fd|\U0001f590\U0001f3fe|\U0001f590\U0001f3ff|\u270b\U0001f3fb|\u270b\U0001f3fc|\u270b\U0001f3fd|\u270b\U0001f3fe|\u270b\U0001f3ff|\U0001f596\U0001f3fb|\U0001f596\U0001f3fc|\U0001f596\U0001f3fd|\U0001f596\U0001f3fe|\U0001f596\U0001f3ff|\U0001f44c\U0001f3fb|\U0001f44c\U0001f3fc|\U0001f44c\U0001f3fd|\U0001f44c\U0001f3fe|\U0001f44c\U0001f3ff|\U0001f90f\U0001f3fb|\U0001f90f\U0001f3fc|\U0001f90f\U0001f3fd|\U0001f90f\U0001f3fe|\U0001f90f\U0001f3ff|\u270c\ufe0f|\u270c\U0001f3fb|\u270c\U0001f3fc|\u270c\U0001f3fd|\u270c\U0001f3fe|\u270c\U0001f3ff|\U0001f91e\U0001f3fb|\U0001f91e\U0001f3fc|\U0001f91e\U0001f3fd|\U0001f91e\U0001f3fe|\U0001f91e\U0001f3ff|\U0001f91f\U0001f3fb|\U0001f91f\U0001f3fc|\U0001f91f\U0001f3fd|\U0001f91f\U0001f3fe|\U0001f91f\U0001f3ff|\U0001f918\U0001f3fb|\U0001f918\U0001f3fc|\U0001f918\U0001f3fd|\U0001f918\U0001f3fe|\U0001f918\U0001f3ff|\U0001f919\U0001f3fb|\U0001f919\U0001f3fc|\U0001f919\U0001f3fd|\U0001f919\U0001f3fe|\U0001f919\U0001f3ff|\U0001f448\U0001f3fb|\U0001f448\U0001f3fc|\U0001f448\U0001f3fd|\U0001f448\U0001f3fe|\U0001f448\U0001f3ff|\U0001f449\U0001f3fb|\U0001f449\U0001f3fc|\U0001f449\U0001f3fd|\U0001f449\U0001f3fe|\U0001f449\U0001f3ff|\U0001f446\U0001f3fb|\U0001f446\U0001f3fc|\U0001f446\U0001f3fd|\U0001f446\U0001f3fe|\U0001f446\U0001f3ff|\U0001f595\U0001f3fb|\U0001f595\U0001f3fc|\U0001f595\U0001f3fd|\U0001f595\U0001f3fe|\U0001f595\U0001f3ff|\U0001f447\U0001f3fb|\U0001f447\U0001f3fc|\U0001f447\U0001f3fd|\U0001f447\U0001f3fe|\U0001f447\U0001f3ff|\u261d\ufe0f|\u261d\U0001f3fb|\u261d\U0001f3fc|\u261d\U0001f3fd|\u261d\U0001f3fe|\u261d\U0001f3ff|\U0001f44d\U0001f3fb|\U0001f44d\U0001f3fc|\U0001f44d\U0001f3fd|\U0001f44d\U0001f3fe|\U0001f44d\U0001f3ff|\U0001f44e\U0001f3fb|\U0001f44e\U0001f3fc|\U0001f44e\U0001f3fd|\U0001f44e\U0001f3fe|\U0001f44e\U0001f3ff|\u270a\U0001f3fb|\u270a\U0001f3fc|\u270a\U0001f3fd|\u270a\U0001f3fe|\u270a\U0001f3ff|\U0001f44a\U0001f3fb|\U0001f44a\U0001f3fc|\U0001f44a\U0001f3fd|\U0001f44a\U0001f3fe|\U0001f44a\U0001f3ff|\U0001f91b\U0001f3fb|\U0001f91b\U0001f3fc|\U0001f91b\U0001f3fd|\U0001f91b\U0001f3fe|\U0001f91b\U0001f3ff|\U0001f91c\U0001f3fb|\U0001f91c\U0001f3fc|\U0001f91c\U0001f3fd|\U0001f91c\U0001f3fe|\U0001f91c\U0001f3ff|\U0001f44f\U0001f3fb|\U0001f44f\U0001f3fc|\U0001f44f\U0001f3fd|\U0001f44f\U0001f3fe|\U0001f44f\U0001f3ff|\U0001f64c\U0001f3fb|\U0001f64c\U0001f3fc|\U0001f64c\U0001f3fd|\U0001f64c\U0001f3fe|\U0001f64c\U0001f3ff|\U0001f450\U0001f3fb|\U0001f450\U0001f3fc|\U0001f450\U0001f3fd|\U0001f450\U0001f3fe|\U0001f450\U0001f3ff|\U0001f932\U0001f3fb|\U0001f932\U0001f3fc|\U0001f932\U0001f3fd|\U0001f932\U0001f3fe|\U0001f932\U0001f3ff|\U0001f64f\U0001f3fb|\U0001f64f\U0001f3fc|\U0001f64f\U0001f3fd|\U0001f64f\U0001f3fe|\U0001f64f\U0001f3ff|\u270d\ufe0f|\u270d\U0001f3fb|\u270d\U0001f3fc|\u270d\U0001f3fd|\u270d\U0001f3fe|\u270d\U0001f3ff|\U0001f485\U0001f3fb|\U0001f485\U0001f3fc|\U0001f485\U0001f3fd|\U0001f485\U0001f3fe|\U0001f485\U0001f3ff|\U0001f933\U0001f3fb|\U0001f933\U0001f3fc|\U0001f933\U0001f3fd|\U0001f933\U0001f3fe|\U0001f933\U0001f3ff|\U0001f4aa\U0001f3fb|\U0001f4aa\U0001f3fc|\U0001f4aa\U0001f3fd|\U0001f4aa\U0001f3fe|\U0001f4aa\U0001f3ff|\U0001f9b5\U0001f3fb|\U0001f9b5\U0001f3fc|\U0001f9b5\U0001f3fd|\U0001f9b5\U0001f3fe|\U0001f9b5\U0001f3ff|\U0001f9b6\U0001f3fb|\U0001f9b6\U0001f3fc|\U0001f9b6\U0001f3fd|\U0001f9b6\U0001f3fe|\U0001f9b6\U0001f3ff|\U0001f442\U0001f3fb|\U0001f442\U0001f3fc|\U0001f442\U0001f3fd|\U0001f442\U0001f3fe|\U0001f442\U0001f3ff|\U0001f9bb\U0001f3fb|\U0001f9bb\U0001f3fc|\U0001f9bb\U0001f3fd|\U0001f9bb\U0001f3fe|\U0001f9bb\U0001f3ff|\U0001f443\U0001f3fb|\U0001f443\U0001f3fc|\U0001f443\U0001f3fd|\U0001f443\U0001f3fe|\U0001f443\U0001f3ff|\U0001f441\ufe0f|\U0001f476\U0001f3fb|\U0001f476\U0001f3fc|\U0001f476\U0001f3fd|\U0001f476\U0001f3fe|\U0001f476\U0001f3ff|\U0001f9d2\U0001f3fb|\U0001f9d2\U0001f3fc|\U0001f9d2\U0001f3fd|\U0001f9d2\U0001f3fe|\U0001f9d2\U0001f3ff|\U0001f466\U0001f3fb|\U0001f466\U0001f3fc|\U0001f466\U0001f3fd|\U0001f466\U0001f3fe|\U0001f466\U0001f3ff|\U0001f467\U0001f3fb|\U0001f467\U0001f3fc|\U0001f467\U0001f3fd|\U0001f467\U0001f3fe|\U0001f467\U0001f3ff|\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3fe|\U0001f9d1\U0001f3ff|\U0001f471\U0001f3fb|\U0001f471\U0001f3fc|\U0001f471\U0001f3fd|\U0001f471\U0001f3fe|\U0001f471\U0001f3ff|\U0001f468\U0001f3fb|\U0001f468\U0001f3fc|\U0001f468\U0001f3fd|\U0001f468\U0001f3fe|\U0001f468\U0001f3ff|\U0001f9d4\U0001f3fb|\U0001f9d4\U0001f3fc|\U0001f9d4\U0001f3fd|\U0001f9d4\U0001f3fe|\U0001f9d4\U0001f3ff|\U0001f469\U0001f3fb|\U0001f469\U0001f3fc|\U0001f469\U0001f3fd|\U0001f469\U0001f3fe|\U0001f469\U0001f3ff|\U0001f9d3\U0001f3fb|\U0001f9d3\U0001f3fc|\U0001f9d3\U0001f3fd|\U0001f9d3\U0001f3fe|\U0001f9d3\U0001f3ff|\U0001f474\U0001f3fb|\U0001f474\U0001f3fc|\U0001f474\U0001f3fd|\U0001f474\U0001f3fe|\U0001f474\U0001f3ff|\U0001f475\U0001f3fb|\U0001f475\U0001f3fc|\U0001f475\U0001f3fd|\U0001f475\U0001f3fe|\U0001f475\U0001f3ff|\U0001f64d\U0001f3fb|\U0001f64d\U0001f3fc|\U0001f64d\U0001f3fd|\U0001f64d\U0001f3fe|\U0001f64d\U0001f3ff|\U0001f64e\U0001f3fb|\U0001f64e\U0001f3fc|\U0001f64e\U0001f3fd|\U0001f64e\U0001f3fe|\U0001f64e\U0001f3ff|\U0001f645\U0001f3fb|\U0001f645\U0001f3fc|\U0001f645\U0001f3fd|\U0001f645\U0001f3fe|\U0001f645\U0001f3ff|\U0001f646\U0001f3fb|\U0001f646\U0001f3fc|\U0001f646\U0001f3fd|\U0001f646\U0001f3fe|\U0001f646\U0001f3ff|\U0001f481\U0001f3fb|\U0001f481\U0001f3fc|\U0001f481\U0001f3fd|\U0001f481\U0001f3fe|\U0001f481\U0001f3ff|\U0001f64b\U0001f3fb|\U0001f64b\U0001f3fc|\U0001f64b\U0001f3fd|\U0001f64b\U0001f3fe|\U0001f64b\U0001f3ff|\U0001f9cf\U0001f3fb|\U0001f9cf\U0001f3fc|\U0001f9cf\U0001f3fd|\U0001f9cf\U0001f3fe|\U0001f9cf\U0001f3ff|\U0001f647\U0001f3fb|\U0001f647\U0001f3fc|\U0001f647\U0001f3fd|\U0001f647\U0001f3fe|\U0001f647\U0001f3ff|\U0001f926\U0001f3fb|\U0001f926\U0001f3fc|\U0001f926\U0001f3fd|\U0001f926\U0001f3fe|\U0001f926\U0001f3ff|\U0001f937\U0001f3fb|\U0001f937\U0001f3fc|\U0001f937\U0001f3fd|\U0001f937\U0001f3fe|\U0001f937\U0001f3ff|\U0001f46e\U0001f3fb|\U0001f46e\U0001f3fc|\U0001f46e\U0001f3fd|\U0001f46e\U0001f3fe|\U0001f46e\U0001f3ff|\U0001f575\ufe0f|\U0001f575\U0001f3fb|\U0001f575\U0001f3fc|\U0001f575\U0001f3fd|\U0001f575\U0001f3fe|\U0001f575\U0001f3ff|\U0001f482\U0001f3fb|\U0001f482\U0001f3fc|\U0001f482\U0001f3fd|\U0001f482\U0001f3fe|\U0001f482\U0001f3ff|\U0001f477\U0001f3fb|\U0001f477\U0001f3fc|\U0001f477\U0001f3fd|\U0001f477\U0001f3fe|\U0001f477\U0001f3ff|\U0001f934\U0001f3fb|\U0001f934\U0001f3fc|\U0001f934\U0001f3fd|\U0001f934\U0001f3fe|\U0001f934\U0001f3ff|\U0001f478\U0001f3fb|\U0001f478\U0001f3fc|\U0001f478\U0001f3fd|\U0001f478\U0001f3fe|\U0001f478\U0001f3ff|\U0001f473\U0001f3fb|\U0001f473\U0001f3fc|\U0001f473\U0001f3fd|\U0001f473\U0001f3fe|\U0001f473\U0001f3ff|\U0001f472\U0001f3fb|\U0001f472\U0001f3fc|\U0001f472\U0001f3fd|\U0001f472\U0001f3fe|\U0001f472\U0001f3ff|\U0001f9d5\U0001f3fb|\U0001f9d5\U0001f3fc|\U0001f9d5\U0001f3fd|\U0001f9d5\U0001f3fe|\U0001f9d5\U0001f3ff|\U0001f935\U0001f3fb|\U0001f935\U0001f3fc|\U0001f935\U0001f3fd|\U0001f935\U0001f3fe|\U0001f935\U0001f3ff|\U0001f470\U0001f3fb|\U0001f470\U0001f3fc|\U0001f470\U0001f3fd|\U0001f470\U0001f3fe|\U0001f470\U0001f3ff|\U0001f930\U0001f3fb|\U0001f930\U0001f3fc|\U0001f930\U0001f3fd|\U0001f930\U0001f3fe|\U0001f930\U0001f3ff|\U0001f931\U0001f3fb|\U0001f931\U0001f3fc|\U0001f931\U0001f3fd|\U0001f931\U0001f3fe|\U0001f931\U0001f3ff|\U0001f47c\U0001f3fb|\U0001f47c\U0001f3fc|\U0001f47c\U0001f3fd|\U0001f47c\U0001f3fe|\U0001f47c\U0001f3ff|\U0001f385\U0001f3fb|\U0001f385\U0001f3fc|\U0001f385\U0001f3fd|\U0001f385\U0001f3fe|\U0001f385\U0001f3ff|\U0001f936\U0001f3fb|\U0001f936\U0001f3fc|\U0001f936\U0001f3fd|\U0001f936\U0001f3fe|\U0001f936\U0001f3ff|\U0001f9b8\U0001f3fb|\U0001f9b8\U0001f3fc|\U0001f9b8\U0001f3fd|\U0001f9b8\U0001f3fe|\U0001f9b8\U0001f3ff|\U0001f9b9\U0001f3fb|\U0001f9b9\U0001f3fc|\U0001f9b9\U0001f3fd|\U0001f9b9\U0001f3fe|\U0001f9b9\U0001f3ff|\U0001f9d9\U0001f3fb|\U0001f9d9\U0001f3fc|\U0001f9d9\U0001f3fd|\U0001f9d9\U0001f3fe|\U0001f9d9\U0001f3ff|\U0001f9da\U0001f3fb|\U0001f9da\U0001f3fc|\U0001f9da\U0001f3fd|\U0001f9da\U0001f3fe|\U0001f9da\U0001f3ff|\U0001f9db\U0001f3fb|\U0001f9db\U0001f3fc|\U0001f9db\U0001f3fd|\U0001f9db\U0001f3fe|\U0001f9db\U0001f3ff|\U0001f9dc\U0001f3fb|\U0001f9dc\U0001f3fc|\U0001f9dc\U0001f3fd|\U0001f9dc\U0001f3fe|\U0001f9dc\U0001f3ff|\U0001f9dd\U0001f3fb|\U0001f9dd\U0001f3fc|\U0001f9dd\U0001f3fd|\U0001f9dd\U0001f3fe|\U0001f9dd\U0001f3ff|\U0001f486\U0001f3fb|\U0001f486\U0001f3fc|\U0001f486\U0001f3fd|\U0001f486\U0001f3fe|\U0001f486\U0001f3ff|\U0001f487\U0001f3fb|\U0001f487\U0001f3fc|\U0001f487\U0001f3fd|\U0001f487\U0001f3fe|\U0001f487\U0001f3ff|\U0001f6b6\U0001f3fb|\U0001f6b6\U0001f3fc|\U0001f6b6\U0001f3fd|\U0001f6b6\U0001f3fe|\U0001f6b6\U0001f3ff|\U0001f9cd\U0001f3fb|\U0001f9cd\U0001f3fc|\U0001f9cd\U0001f3fd|\U0001f9cd\U0001f3fe|\U0001f9cd\U0001f3ff|\U0001f9ce\U0001f3fb|\U0001f9ce\U0001f3fc|\U0001f9ce\U0001f3fd|\U0001f9ce\U0001f3fe|\U0001f9ce\U0001f3ff|\U0001f3c3\U0001f3fb|\U0001f3c3\U0001f3fc|\U0001f3c3\U0001f3fd|\U0001f3c3\U0001f3fe|\U0001f3c3\U0001f3ff|\U0001f483\U0001f3fb|\U0001f483\U0001f3fc|\U0001f483\U0001f3fd|\U0001f483\U0001f3fe|\U0001f483\U0001f3ff|\U0001f57a\U0001f3fb|\U0001f57a\U0001f3fc|\U0001f57a\U0001f3fd|\U0001f57a\U0001f3fe|\U0001f57a\U0001f3ff|\U0001f574\ufe0f|\U0001f574\U0001f3fb|\U0001f574\U0001f3fc|\U0001f574\U0001f3fd|\U0001f574\U0001f3fe|\U0001f574\U0001f3ff|\U0001f9d6\U0001f3fb|\U0001f9d6\U0001f3fc|\U0001f9d6\U0001f3fd|\U0001f9d6\U0001f3fe|\U0001f9d6\U0001f3ff|\U0001f9d7\U0001f3fb|\U0001f9d7\U0001f3fc|\U0001f9d7\U0001f3fd|\U0001f9d7\U0001f3fe|\U0001f9d7\U0001f3ff|\U0001f3c7\U0001f3fb|\U0001f3c7\U0001f3fc|\U0001f3c7\U0001f3fd|\U0001f3c7\U0001f3fe|\U0001f3c7\U0001f3ff|\u26f7\ufe0f|\U0001f3c2\U0001f3fb|\U0001f3c2\U0001f3fc|\U0001f3c2\U0001f3fd|\U0001f3c2\U0001f3fe|\U0001f3c2\U0001f3ff|\U0001f3cc\ufe0f|\U0001f3cc\U0001f3fb|\U0001f3cc\U0001f3fc|\U0001f3cc\U0001f3fd|\U0001f3cc\U0001f3fe|\U0001f3cc\U0001f3ff|\U0001f3c4\U0001f3fb|\U0001f3c4\U0001f3fc|\U0001f3c4\U0001f3fd|\U0001f3c4\U0001f3fe|\U0001f3c4\U0001f3ff|\U0001f6a3\U0001f3fb|\U0001f6a3\U0001f3fc|\U0001f6a3\U0001f3fd|\U0001f6a3\U0001f3fe|\U0001f6a3\U0001f3ff|\U0001f3ca\U0001f3fb|\U0001f3ca\U0001f3fc|\U0001f3ca\U0001f3fd|\U0001f3ca\U0001f3fe|\U0001f3ca\U0001f3ff|\u26f9\ufe0f|\u26f9\U0001f3fb|\u26f9\U0001f3fc|\u26f9\U0001f3fd|\u26f9\U0001f3fe|\u26f9\U0001f3ff|\U0001f3cb\ufe0f|\U0001f3cb\U0001f3fb|\U0001f3cb\U0001f3fc|\U0001f3cb\U0001f3fd|\U0001f3cb\U0001f3fe|\U0001f3cb\U0001f3ff|\U0001f6b4\U0001f3fb|\U0001f6b4\U0001f3fc|\U0001f6b4\U0001f3fd|\U0001f6b4\U0001f3fe|\U0001f6b4\U0001f3ff|\U0001f6b5\U0001f3fb|\U0001f6b5\U0001f3fc|\U0001f6b5\U0001f3fd|\U0001f6b5\U0001f3fe|\U0001f6b5\U0001f3ff|\U0001f938\U0001f3fb|\U0001f938\U0001f3fc|\U0001f938\U0001f3fd|\U0001f938\U0001f3fe|\U0001f938\U0001f3ff|\U0001f93d\U0001f3fb|\U0001f93d\U0001f3fc|\U0001f93d\U0001f3fd|\U0001f93d\U0001f3fe|\U0001f93d\U0001f3ff|\U0001f93e\U0001f3fb|\U0001f93e\U0001f3fc|\U0001f93e\U0001f3fd|\U0001f93e\U0001f3fe|\U0001f93e\U0001f3ff|\U0001f939\U0001f3fb|\U0001f939\U0001f3fc|\U0001f939\U0001f3fd|\U0001f939\U0001f3fe|\U0001f939\U0001f3ff|\U0001f9d8\U0001f3fb|\U0001f9d8\U0001f3fc|\U0001f9d8\U0001f3fd|\U0001f9d8\U0001f3fe|\U0001f9d8\U0001f3ff|\U0001f6c0\U0001f3fb|\U0001f6c0\U0001f3fc|\U0001f6c0\U0001f3fd|\U0001f6c0\U0001f3fe|\U0001f6c0\U0001f3ff|\U0001f6cc\U0001f3fb|\U0001f6cc\U0001f3fc|\U0001f6cc\U0001f3fd|\U0001f6cc\U0001f3fe|\U0001f6cc\U0001f3ff|\U0001f46d\U0001f3fb|\U0001f46d\U0001f3fc|\U0001f46d\U0001f3fd|\U0001f46d\U0001f3fe|\U0001f46d\U0001f3ff|\U0001f46b\U0001f3fb|\U0001f46b\U0001f3fc|\U0001f46b\U0001f3fd|\U0001f46b\U0001f3fe|\U0001f46b\U0001f3ff|\U0001f46c\U0001f3fb|\U0001f46c\U0001f3fc|\U0001f46c\U0001f3fd|\U0001f46c\U0001f3fe|\U0001f46c\U0001f3ff|\U0001f5e3\ufe0f|\U0001f43f\ufe0f|\U0001f54a\ufe0f|\U0001f577\ufe0f|\U0001f578\ufe0f|\U0001f3f5\ufe0f|\u2618\ufe0f|\U0001f336\ufe0f|\U0001f37d\ufe0f|\U0001f5fa\ufe0f|\U0001f3d4\ufe0f|\u26f0\ufe0f|\U0001f3d5\ufe0f|\U0001f3d6\ufe0f|\U0001f3dc\ufe0f|\U0001f3dd\ufe0f|\U0001f3de\ufe0f|\U0001f3df\ufe0f|\U0001f3db\ufe0f|\U0001f3d7\ufe0f|\U0001f3d8\ufe0f|\U0001f3da\ufe0f|\u26e9\ufe0f|\U0001f3d9\ufe0f|\u2668\ufe0f|\U0001f3ce\ufe0f|\U0001f3cd\ufe0f|\U0001f6e3\ufe0f|\U0001f6e4\ufe0f|\U0001f6e2\ufe0f|\U0001f6f3\ufe0f|\u26f4\ufe0f|\U0001f6e5\ufe0f|\u2708\ufe0f|\U0001f6e9\ufe0f|\U0001f6f0\ufe0f|\U0001f6ce\ufe0f|\u23f1\ufe0f|\u23f2\ufe0f|\U0001f570\ufe0f|\U0001f321\ufe0f|\u2600\ufe0f|\u2601\ufe0f|\u26c8\ufe0f|\U0001f324\ufe0f|\U0001f325\ufe0f|\U0001f326\ufe0f|\U0001f327\ufe0f|\U0001f328\ufe0f|\U0001f329\ufe0f|\U0001f32a\ufe0f|\U0001f32b\ufe0f|\U0001f32c\ufe0f|\u2602\ufe0f|\u26f1\ufe0f|\u2744\ufe0f|\u2603\ufe0f|\u2604\ufe0f|\U0001f397\ufe0f|\U0001f39f\ufe0f|\U0001f396\ufe0f|\u26f8\ufe0f|\U0001f579\ufe0f|\u2660\ufe0f|\u2665\ufe0f|\u2666\ufe0f|\u2663\ufe0f|\u265f\ufe0f|\U0001f5bc\ufe0f|\U0001f576\ufe0f|\U0001f6cd\ufe0f|\u26d1\ufe0f|\U0001f399\ufe0f|\U0001f39a\ufe0f|\U0001f39b\ufe0f|\u260e\ufe0f|\U0001f5a5\ufe0f|\U0001f5a8\ufe0f|\u2328\ufe0f|\U0001f5b1\ufe0f|\U0001f5b2\ufe0f|\U0001f39e\ufe0f|\U0001f4fd\ufe0f|\U0001f56f\ufe0f|\U0001f5de\ufe0f|\U0001f3f7\ufe0f|\u2709\ufe0f|\U0001f5f3\ufe0f|\u270f\ufe0f|\u2712\ufe0f|\U0001f58b\ufe0f|\U0001f58a\ufe0f|\U0001f58c\ufe0f|\U0001f58d\ufe0f|\U0001f5c2\ufe0f|\U0001f5d2\ufe0f|\U0001f5d3\ufe0f|\U0001f587\ufe0f|\u2702\ufe0f|\U0001f5c3\ufe0f|\U0001f5c4\ufe0f|\U0001f5d1\ufe0f|\U0001f5dd\ufe0f|\u26cf\ufe0f|\u2692\ufe0f|\U0001f6e0\ufe0f|\U0001f5e1\ufe0f|\u2694\ufe0f|\U0001f6e1\ufe0f|\u2699\ufe0f|\U0001f5dc\ufe0f|\u2696\ufe0f|\u26d3\ufe0f|\u2697\ufe0f|\U0001f6cf\ufe0f|\U0001f6cb\ufe0f|\u26b0\ufe0f|\u26b1\ufe0f|\u26a0\ufe0f|\u2622\ufe0f|\u2623\ufe0f|\u2b06\ufe0f|\u2197\ufe0f|\u27a1\ufe0f|\u2198\ufe0f|\u2b07\ufe0f|\u2199\ufe0f|\u2b05\ufe0f|\u2196\ufe0f|\u2195\ufe0f|\u2194\ufe0f|\u21a9\ufe0f|\u21aa\ufe0f|\u2934\ufe0f|\u2935\ufe0f|\u269b\ufe0f|\U0001f549\ufe0f|\u2721\ufe0f|\u2638\ufe0f|\u262f\ufe0f|\u271d\ufe0f|\u2626\ufe0f|\u262a\ufe0f|\u262e\ufe0f|\u25b6\ufe0f|\u23ed\ufe0f|\u23ef\ufe0f|\u25c0\ufe0f|\u23ee\ufe0f|\u23f8\ufe0f|\u23f9\ufe0f|\u23fa\ufe0f|\u23cf\ufe0f|\u2640\ufe0f|\u2642\ufe0f|\u2695\ufe0f|\u267e\ufe0f|\u267b\ufe0f|\u269c\ufe0f|\u2611\ufe0f|\u2714\ufe0f|\u2716\ufe0f|\u303d\ufe0f|\u2733\ufe0f|\u2734\ufe0f|\u2747\ufe0f|\u203c\ufe0f|\u2049\ufe0f|\u3030\ufe0f|\xa9\ufe0f|\xae\ufe0f|\u2122\ufe0f|\\#\u20e3|\\*\u20e3|0\u20e3|1\u20e3|2\u20e3|3\u20e3|4\u20e3|5\u20e3|6\u20e3|7\u20e3|8\u20e3|9\u20e3|\U0001f170\ufe0f|\U0001f171\ufe0f|\u2139\ufe0f|\u24c2\ufe0f|\U0001f17e\ufe0f|\U0001f17f\ufe0f|\U0001f202\ufe0f|\U0001f237\ufe0f|\u3297\ufe0f|\u3299\ufe0f|\u25fc\ufe0f|\u25fb\ufe0f|\u25aa\ufe0f|\u25ab\ufe0f|\U0001f3f3\ufe0f|\U0001f1e6\U0001f1e8|\U0001f1e6\U0001f1e9|\U0001f1e6\U0001f1ea|\U0001f1e6\U0001f1eb|\U0001f1e6\U0001f1ec|\U0001f1e6\U0001f1ee|\U0001f1e6\U0001f1f1|\U0001f1e6\U0001f1f2|\U0001f1e6\U0001f1f4|\U0001f1e6\U0001f1f6|\U0001f1e6\U0001f1f7|\U0001f1e6\U0001f1f8|\U0001f1e6\U0001f1f9|\U0001f1e6\U0001f1fa|\U0001f1e6\U0001f1fc|\U0001f1e6\U0001f1fd|\U0001f1e6\U0001f1ff|\U0001f1e7\U0001f1e6|\U0001f1e7\U0001f1e7|\U0001f1e7\U0001f1e9|\U0001f1e7\U0001f1ea|\U0001f1e7\U0001f1eb|\U0001f1e7\U0001f1ec|\U0001f1e7\U0001f1ed|\U0001f1e7\U0001f1ee|\U0001f1e7\U0001f1ef|\U0001f1e7\U0001f1f1|\U0001f1e7\U0001f1f2|\U0001f1e7\U0001f1f3|\U0001f1e7\U0001f1f4|\U0001f1e7\U0001f1f6|\U0001f1e7\U0001f1f7|\U0001f1e7\U0001f1f8|\U0001f1e7\U0001f1f9|\U0001f1e7\U0001f1fb|\U0001f1e7\U0001f1fc|\U0001f1e7\U0001f1fe|\U0001f1e7\U0001f1ff|\U0001f1e8\U0001f1e6|\U0001f1e8\U0001f1e8|\U0001f1e8\U0001f1e9|\U0001f1e8\U0001f1eb|\U0001f1e8\U0001f1ec|\U0001f1e8\U0001f1ed|\U0001f1e8\U0001f1ee|\U0001f1e8\U0001f1f0|\U0001f1e8\U0001f1f1|\U0001f1e8\U0001f1f2|\U0001f1e8\U0001f1f3|\U0001f1e8\U0001f1f4|\U0001f1e8\U0001f1f5|\U0001f1e8\U0001f1f7|\U0001f1e8\U0001f1fa|\U0001f1e8\U0001f1fb|\U0001f1e8\U0001f1fc|\U0001f1e8\U0001f1fd|\U0001f1e8\U0001f1fe|\U0001f1e8\U0001f1ff|\U0001f1e9\U0001f1ea|\U0001f1e9\U0001f1ec|\U0001f1e9\U0001f1ef|\U0001f1e9\U0001f1f0|\U0001f1e9\U0001f1f2|\U0001f1e9\U0001f1f4|\U0001f1e9\U0001f1ff|\U0001f1ea\U0001f1e6|\U0001f1ea\U0001f1e8|\U0001f1ea\U0001f1ea|\U0001f1ea\U0001f1ec|\U0001f1ea\U0001f1ed|\U0001f1ea\U0001f1f7|\U0001f1ea\U0001f1f8|\U0001f1ea\U0001f1f9|\U0001f1ea\U0001f1fa|\U0001f1eb\U0001f1ee|\U0001f1eb\U0001f1ef|\U0001f1eb\U0001f1f0|\U0001f1eb\U0001f1f2|\U0001f1eb\U0001f1f4|\U0001f1eb\U0001f1f7|\U0001f1ec\U0001f1e6|\U0001f1ec\U0001f1e7|\U0001f1ec\U0001f1e9|\U0001f1ec\U0001f1ea|\U0001f1ec\U0001f1eb|\U0001f1ec\U0001f1ec|\U0001f1ec\U0001f1ed|\U0001f1ec\U0001f1ee|\U0001f1ec\U0001f1f1|\U0001f1ec\U0001f1f2|\U0001f1ec\U0001f1f3|\U0001f1ec\U0001f1f5|\U0001f1ec\U0001f1f6|\U0001f1ec\U0001f1f7|\U0001f1ec\U0001f1f8|\U0001f1ec\U0001f1f9|\U0001f1ec\U0001f1fa|\U0001f1ec\U0001f1fc|\U0001f1ec\U0001f1fe|\U0001f1ed\U0001f1f0|\U0001f1ed\U0001f1f2|\U0001f1ed\U0001f1f3|\U0001f1ed\U0001f1f7|\U0001f1ed\U0001f1f9|\U0001f1ed\U0001f1fa|\U0001f1ee\U0001f1e8|\U0001f1ee\U0001f1e9|\U0001f1ee\U0001f1ea|\U0001f1ee\U0001f1f1|\U0001f1ee\U0001f1f2|\U0001f1ee\U0001f1f3|\U0001f1ee\U0001f1f4|\U0001f1ee\U0001f1f6|\U0001f1ee\U0001f1f7|\U0001f1ee\U0001f1f8|\U0001f1ee\U0001f1f9|\U0001f1ef\U0001f1ea|\U0001f1ef\U0001f1f2|\U0001f1ef\U0001f1f4|\U0001f1ef\U0001f1f5|\U0001f1f0\U0001f1ea|\U0001f1f0\U0001f1ec|\U0001f1f0\U0001f1ed|\U0001f1f0\U0001f1ee|\U0001f1f0\U0001f1f2|\U0001f1f0\U0001f1f3|\U0001f1f0\U0001f1f5|\U0001f1f0\U0001f1f7|\U0001f1f0\U0001f1fc|\U0001f1f0\U0001f1fe|\U0001f1f0\U0001f1ff|\U0001f1f1\U0001f1e6|\U0001f1f1\U0001f1e7|\U0001f1f1\U0001f1e8|\U0001f1f1\U0001f1ee|\U0001f1f1\U0001f1f0|\U0001f1f1\U0001f1f7|\U0001f1f1\U0001f1f8|\U0001f1f1\U0001f1f9|\U0001f1f1\U0001f1fa|\U0001f1f1\U0001f1fb|\U0001f1f1\U0001f1fe|\U0001f1f2\U0001f1e6|\U0001f1f2\U0001f1e8|\U0001f1f2\U0001f1e9|\U0001f1f2\U0001f1ea|\U0001f1f2\U0001f1eb|\U0001f1f2\U0001f1ec|\U0001f1f2\U0001f1ed|\U0001f1f2\U0001f1f0|\U0001f1f2\U0001f1f1|\U0001f1f2\U0001f1f2|\U0001f1f2\U0001f1f3|\U0001f1f2\U0001f1f4|\U0001f1f2\U0001f1f5|\U0001f1f2\U0001f1f6|\U0001f1f2\U0001f1f7|\U0001f1f2\U0001f1f8|\U0001f1f2\U0001f1f9|\U0001f1f2\U0001f1fa|\U0001f1f2\U0001f1fb|\U0001f1f2\U0001f1fc|\U0001f1f2\U0001f1fd|\U0001f1f2\U0001f1fe|\U0001f1f2\U0001f1ff|\U0001f1f3\U0001f1e6|\U0001f1f3\U0001f1e8|\U0001f1f3\U0001f1ea|\U0001f1f3\U0001f1eb|\U0001f1f3\U0001f1ec|\U0001f1f3\U0001f1ee|\U0001f1f3\U0001f1f1|\U0001f1f3\U0001f1f4|\U0001f1f3\U0001f1f5|\U0001f1f3\U0001f1f7|\U0001f1f3\U0001f1fa|\U0001f1f3\U0001f1ff|\U0001f1f4\U0001f1f2|\U0001f1f5\U0001f1e6|\U0001f1f5\U0001f1ea|\U0001f1f5\U0001f1eb|\U0001f1f5\U0001f1ec|\U0001f1f5\U0001f1ed|\U0001f1f5\U0001f1f0|\U0001f1f5\U0001f1f1|\U0001f1f5\U0001f1f2|\U0001f1f5\U0001f1f3|\U0001f1f5\U0001f1f7|\U0001f1f5\U0001f1f8|\U0001f1f5\U0001f1f9|\U0001f1f5\U0001f1fc|\U0001f1f5\U0001f1fe|\U0001f1f6\U0001f1e6|\U0001f1f7\U0001f1ea|\U0001f1f7\U0001f1f4|\U0001f1f7\U0001f1f8|\U0001f1f7\U0001f1fa|\U0001f1f7\U0001f1fc|\U0001f1f8\U0001f1e6|\U0001f1f8\U0001f1e7|\U0001f1f8\U0001f1e8|\U0001f1f8\U0001f1e9|\U0001f1f8\U0001f1ea|\U0001f1f8\U0001f1ec|\U0001f1f8\U0001f1ed|\U0001f1f8\U0001f1ee|\U0001f1f8\U0001f1ef|\U0001f1f8\U0001f1f0|\U0001f1f8\U0001f1f1|\U0001f1f8\U0001f1f2|\U0001f1f8\U0001f1f3|\U0001f1f8\U0001f1f4|\U0001f1f8\U0001f1f7|\U0001f1f8\U0001f1f8|\U0001f1f8\U0001f1f9|\U0001f1f8\U0001f1fb|\U0001f1f8\U0001f1fd|\U0001f1f8\U0001f1fe|\U0001f1f8\U0001f1ff|\U0001f1f9\U0001f1e6|\U0001f1f9\U0001f1e8|\U0001f1f9\U0001f1e9|\U0001f1f9\U0001f1eb|\U0001f1f9\U0001f1ec|\U0001f1f9\U0001f1ed|\U0001f1f9\U0001f1ef|\U0001f1f9\U0001f1f0|\U0001f1f9\U0001f1f1|\U0001f1f9\U0001f1f2|\U0001f1f9\U0001f1f3|\U0001f1f9\U0001f1f4|\U0001f1f9\U0001f1f7|\U0001f1f9\U0001f1f9|\U0001f1f9\U0001f1fb|\U0001f1f9\U0001f1fc|\U0001f1f9\U0001f1ff|\U0001f1fa\U0001f1e6|\U0001f1fa\U0001f1ec|\U0001f1fa\U0001f1f2|\U0001f1fa\U0001f1f3|\U0001f1fa\U0001f1f8|\U0001f1fa\U0001f1fe|\U0001f1fa\U0001f1ff|\U0001f1fb\U0001f1e6|\U0001f1fb\U0001f1e8|\U0001f1fb\U0001f1ea|\U0001f1fb\U0001f1ec|\U0001f1fb\U0001f1ee|\U0001f1fb\U0001f1f3|\U0001f1fb\U0001f1fa|\U0001f1fc\U0001f1eb|\U0001f1fc\U0001f1f8|\U0001f1fd\U0001f1f0|\U0001f1fe\U0001f1ea|\U0001f1fe\U0001f1f9|\U0001f1ff\U0001f1e6|\U0001f1ff\U0001f1f2|\U0001f1ff\U0001f1fc|\U0001f600|\U0001f603|\U0001f604|\U0001f601|\U0001f606|\U0001f605|\U0001f923|\U0001f602|\U0001f642|\U0001f643|\U0001f609|\U0001f60a|\U0001f607|\U0001f970|\U0001f60d|\U0001f929|\U0001f618|\U0001f617|\u263a|\U0001f61a|\U0001f619|\U0001f60b|\U0001f61b|\U0001f61c|\U0001f92a|\U0001f61d|\U0001f911|\U0001f917|\U0001f92d|\U0001f92b|\U0001f914|\U0001f910|\U0001f928|\U0001f610|\U0001f611|\U0001f636|\U0001f60f|\U0001f612|\U0001f644|\U0001f62c|\U0001f925|\U0001f60c|\U0001f614|\U0001f62a|\U0001f924|\U0001f634|\U0001f637|\U0001f912|\U0001f915|\U0001f922|\U0001f92e|\U0001f927|\U0001f975|\U0001f976|\U0001f974|\U0001f635|\U0001f92f|\U0001f920|\U0001f973|\U0001f60e|\U0001f913|\U0001f9d0|\U0001f615|\U0001f61f|\U0001f641|\u2639|\U0001f62e|\U0001f62f|\U0001f632|\U0001f633|\U0001f97a|\U0001f626|\U0001f627|\U0001f628|\U0001f630|\U0001f625|\U0001f622|\U0001f62d|\U0001f631|\U0001f616|\U0001f623|\U0001f61e|\U0001f613|\U0001f629|\U0001f62b|\U0001f971|\U0001f624|\U0001f621|\U0001f620|\U0001f92c|\U0001f608|\U0001f47f|\U0001f480|\u2620|\U0001f4a9|\U0001f921|\U0001f479|\U0001f47a|\U0001f47b|\U0001f47d|\U0001f47e|\U0001f916|\U0001f63a|\U0001f638|\U0001f639|\U0001f63b|\U0001f63c|\U0001f63d|\U0001f640|\U0001f63f|\U0001f63e|\U0001f648|\U0001f649|\U0001f64a|\U0001f48b|\U0001f48c|\U0001f498|\U0001f49d|\U0001f496|\U0001f497|\U0001f493|\U0001f49e|\U0001f495|\U0001f49f|\u2763|\U0001f494|\u2764|\U0001f9e1|\U0001f49b|\U0001f49a|\U0001f499|\U0001f49c|\U0001f90e|\U0001f5a4|\U0001f90d|\U0001f4af|\U0001f4a2|\U0001f4a5|\U0001f4ab|\U0001f4a6|\U0001f4a8|\U0001f573|\U0001f4a3|\U0001f4ac|\U0001f5e8|\U0001f5ef|\U0001f4ad|\U0001f4a4|\U0001f44b|\U0001f91a|\U0001f590|\u270b|\U0001f596|\U0001f44c|\U0001f90f|\u270c|\U0001f91e|\U0001f91f|\U0001f918|\U0001f919|\U0001f448|\U0001f449|\U0001f446|\U0001f595|\U0001f447|\u261d|\U0001f44d|\U0001f44e|\u270a|\U0001f44a|\U0001f91b|\U0001f91c|\U0001f44f|\U0001f64c|\U0001f450|\U0001f932|\U0001f91d|\U0001f64f|\u270d|\U0001f485|\U0001f933|\U0001f4aa|\U0001f9be|\U0001f9bf|\U0001f9b5|\U0001f9b6|\U0001f442|\U0001f9bb|\U0001f443|\U0001f9e0|\U0001f9b7|\U0001f9b4|\U0001f440|\U0001f441|\U0001f445|\U0001f444|\U0001f476|\U0001f9d2|\U0001f466|\U0001f467|\U0001f9d1|\U0001f471|\U0001f468|\U0001f9d4|\U0001f469|\U0001f9d3|\U0001f474|\U0001f475|\U0001f64d|\U0001f64e|\U0001f645|\U0001f646|\U0001f481|\U0001f64b|\U0001f9cf|\U0001f647|\U0001f926|\U0001f937|\U0001f46e|\U0001f575|\U0001f482|\U0001f477|\U0001f934|\U0001f478|\U0001f473|\U0001f472|\U0001f9d5|\U0001f935|\U0001f470|\U0001f930|\U0001f931|\U0001f47c|\U0001f385|\U0001f936|\U0001f9b8|\U0001f9b9|\U0001f9d9|\U0001f9da|\U0001f9db|\U0001f9dc|\U0001f9dd|\U0001f9de|\U0001f9df|\U0001f486|\U0001f487|\U0001f6b6|\U0001f9cd|\U0001f9ce|\U0001f3c3|\U0001f483|\U0001f57a|\U0001f574|\U0001f46f|\U0001f9d6|\U0001f9d7|\U0001f93a|\U0001f3c7|\u26f7|\U0001f3c2|\U0001f3cc|\U0001f3c4|\U0001f6a3|\U0001f3ca|\u26f9|\U0001f3cb|\U0001f6b4|\U0001f6b5|\U0001f938|\U0001f93c|\U0001f93d|\U0001f93e|\U0001f939|\U0001f9d8|\U0001f6c0|\U0001f6cc|\U0001f46d|\U0001f46b|\U0001f46c|\U0001f48f|\U0001f491|\U0001f46a|\U0001f5e3|\U0001f464|\U0001f465|\U0001f463|\U0001f3fb|\U0001f3fc|\U0001f3fd|\U0001f3fe|\U0001f3ff|\U0001f9b0|\U0001f9b1|\U0001f9b3|\U0001f9b2|\U0001f435|\U0001f412|\U0001f98d|\U0001f9a7|\U0001f436|\U0001f415|\U0001f9ae|\U0001f429|\U0001f43a|\U0001f98a|\U0001f99d|\U0001f431|\U0001f408|\U0001f981|\U0001f42f|\U0001f405|\U0001f406|\U0001f434|\U0001f40e|\U0001f984|\U0001f993|\U0001f98c|\U0001f42e|\U0001f402|\U0001f403|\U0001f404|\U0001f437|\U0001f416|\U0001f417|\U0001f43d|\U0001f40f|\U0001f411|\U0001f410|\U0001f42a|\U0001f42b|\U0001f999|\U0001f992|\U0001f418|\U0001f98f|\U0001f99b|\U0001f42d|\U0001f401|\U0001f400|\U0001f439|\U0001f430|\U0001f407|\U0001f43f|\U0001f994|\U0001f987|\U0001f43b|\U0001f428|\U0001f43c|\U0001f9a5|\U0001f9a6|\U0001f9a8|\U0001f998|\U0001f9a1|\U0001f43e|\U0001f983|\U0001f414|\U0001f413|\U0001f423|\U0001f424|\U0001f425|\U0001f426|\U0001f427|\U0001f54a|\U0001f985|\U0001f986|\U0001f9a2|\U0001f989|\U0001f9a9|\U0001f99a|\U0001f99c|\U0001f438|\U0001f40a|\U0001f422|\U0001f98e|\U0001f40d|\U0001f432|\U0001f409|\U0001f995|\U0001f996|\U0001f433|\U0001f40b|\U0001f42c|\U0001f41f|\U0001f420|\U0001f421|\U0001f988|\U0001f419|\U0001f41a|\U0001f40c|\U0001f98b|\U0001f41b|\U0001f41c|\U0001f41d|\U0001f41e|\U0001f997|\U0001f577|\U0001f578|\U0001f982|\U0001f99f|\U0001f9a0|\U0001f490|\U0001f338|\U0001f4ae|\U0001f3f5|\U0001f339|\U0001f940|\U0001f33a|\U0001f33b|\U0001f33c|\U0001f337|\U0001f331|\U0001f332|\U0001f333|\U0001f334|\U0001f335|\U0001f33e|\U0001f33f|\u2618|\U0001f340|\U0001f341|\U0001f342|\U0001f343|\U0001f347|\U0001f348|\U0001f349|\U0001f34a|\U0001f34b|\U0001f34c|\U0001f34d|\U0001f96d|\U0001f34e|\U0001f34f|\U0001f350|\U0001f351|\U0001f352|\U0001f353|\U0001f95d|\U0001f345|\U0001f965|\U0001f951|\U0001f346|\U0001f954|\U0001f955|\U0001f33d|\U0001f336|\U0001f952|\U0001f96c|\U0001f966|\U0001f9c4|\U0001f9c5|\U0001f344|\U0001f95c|\U0001f330|\U0001f35e|\U0001f950|\U0001f956|\U0001f968|\U0001f96f|\U0001f95e|\U0001f9c7|\U0001f9c0|\U0001f356|\U0001f357|\U0001f969|\U0001f953|\U0001f354|\U0001f35f|\U0001f355|\U0001f32d|\U0001f96a|\U0001f32e|\U0001f32f|\U0001f959|\U0001f9c6|\U0001f95a|\U0001f373|\U0001f958|\U0001f372|\U0001f963|\U0001f957|\U0001f37f|\U0001f9c8|\U0001f9c2|\U0001f96b|\U0001f371|\U0001f358|\U0001f359|\U0001f35a|\U0001f35b|\U0001f35c|\U0001f35d|\U0001f360|\U0001f362|\U0001f363|\U0001f364|\U0001f365|\U0001f96e|\U0001f361|\U0001f95f|\U0001f960|\U0001f961|\U0001f980|\U0001f99e|\U0001f990|\U0001f991|\U0001f9aa|\U0001f366|\U0001f367|\U0001f368|\U0001f369|\U0001f36a|\U0001f382|\U0001f370|\U0001f9c1|\U0001f967|\U0001f36b|\U0001f36c|\U0001f36d|\U0001f36e|\U0001f36f|\U0001f37c|\U0001f95b|\u2615|\U0001f375|\U0001f376|\U0001f37e|\U0001f377|\U0001f378|\U0001f379|\U0001f37a|\U0001f37b|\U0001f942|\U0001f943|\U0001f964|\U0001f9c3|\U0001f9c9|\U0001f9ca|\U0001f962|\U0001f37d|\U0001f374|\U0001f944|\U0001f52a|\U0001f3fa|\U0001f30d|\U0001f30e|\U0001f30f|\U0001f310|\U0001f5fa|\U0001f5fe|\U0001f9ed|\U0001f3d4|\u26f0|\U0001f30b|\U0001f5fb|\U0001f3d5|\U0001f3d6|\U0001f3dc|\U0001f3dd|\U0001f3de|\U0001f3df|\U0001f3db|\U0001f3d7|\U0001f9f1|\U0001f3d8|\U0001f3da|\U0001f3e0|\U0001f3e1|\U0001f3e2|\U0001f3e3|\U0001f3e4|\U0001f3e5|\U0001f3e6|\U0001f3e8|\U0001f3e9|\U0001f3ea|\U0001f3eb|\U0001f3ec|\U0001f3ed|\U0001f3ef|\U0001f3f0|\U0001f492|\U0001f5fc|\U0001f5fd|\u26ea|\U0001f54c|\U0001f6d5|\U0001f54d|\u26e9|\U0001f54b|\u26f2|\u26fa|\U0001f301|\U0001f303|\U0001f3d9|\U0001f304|\U0001f305|\U0001f306|\U0001f307|\U0001f309|\u2668|\U0001f3a0|\U0001f3a1|\U0001f3a2|\U0001f488|\U0001f3aa|\U0001f682|\U0001f683|\U0001f684|\U0001f685|\U0001f686|\U0001f687|\U0001f688|\U0001f689|\U0001f68a|\U0001f69d|\U0001f69e|\U0001f68b|\U0001f68c|\U0001f68d|\U0001f68e|\U0001f690|\U0001f691|\U0001f692|\U0001f693|\U0001f694|\U0001f695|\U0001f696|\U0001f697|\U0001f698|\U0001f699|\U0001f69a|\U0001f69b|\U0001f69c|\U0001f3ce|\U0001f3cd|\U0001f6f5|\U0001f9bd|\U0001f9bc|\U0001f6fa|\U0001f6b2|\U0001f6f4|\U0001f6f9|\U0001f68f|\U0001f6e3|\U0001f6e4|\U0001f6e2|\u26fd|\U0001f6a8|\U0001f6a5|\U0001f6a6|\U0001f6d1|\U0001f6a7|\u2693|\u26f5|\U0001f6f6|\U0001f6a4|\U0001f6f3|\u26f4|\U0001f6e5|\U0001f6a2|\u2708|\U0001f6e9|\U0001f6eb|\U0001f6ec|\U0001fa82|\U0001f4ba|\U0001f681|\U0001f69f|\U0001f6a0|\U0001f6a1|\U0001f6f0|\U0001f680|\U0001f6f8|\U0001f6ce|\U0001f9f3|\u231b|\u23f3|\u231a|\u23f0|\u23f1|\u23f2|\U0001f570|\U0001f55b|\U0001f567|\U0001f550|\U0001f55c|\U0001f551|\U0001f55d|\U0001f552|\U0001f55e|\U0001f553|\U0001f55f|\U0001f554|\U0001f560|\U0001f555|\U0001f561|\U0001f556|\U0001f562|\U0001f557|\U0001f563|\U0001f558|\U0001f564|\U0001f559|\U0001f565|\U0001f55a|\U0001f566|\U0001f311|\U0001f312|\U0001f313|\U0001f314|\U0001f315|\U0001f316|\U0001f317|\U0001f318|\U0001f319|\U0001f31a|\U0001f31b|\U0001f31c|\U0001f321|\u2600|\U0001f31d|\U0001f31e|\U0001fa90|\u2b50|\U0001f31f|\U0001f320|\U0001f30c|\u2601|\u26c5|\u26c8|\U0001f324|\U0001f325|\U0001f326|\U0001f327|\U0001f328|\U0001f329|\U0001f32a|\U0001f32b|\U0001f32c|\U0001f300|\U0001f308|\U0001f302|\u2602|\u2614|\u26f1|\u26a1|\u2744|\u2603|\u26c4|\u2604|\U0001f525|\U0001f4a7|\U0001f30a|\U0001f383|\U0001f384|\U0001f386|\U0001f387|\U0001f9e8|\u2728|\U0001f388|\U0001f389|\U0001f38a|\U0001f38b|\U0001f38d|\U0001f38e|\U0001f38f|\U0001f390|\U0001f391|\U0001f9e7|\U0001f380|\U0001f381|\U0001f397|\U0001f39f|\U0001f3ab|\U0001f396|\U0001f3c6|\U0001f3c5|\U0001f947|\U0001f948|\U0001f949|\u26bd|\u26be|\U0001f94e|\U0001f3c0|\U0001f3d0|\U0001f3c8|\U0001f3c9|\U0001f3be|\U0001f94f|\U0001f3b3|\U0001f3cf|\U0001f3d1|\U0001f3d2|\U0001f94d|\U0001f3d3|\U0001f3f8|\U0001f94a|\U0001f94b|\U0001f945|\u26f3|\u26f8|\U0001f3a3|\U0001f93f|\U0001f3bd|\U0001f3bf|\U0001f6f7|\U0001f94c|\U0001f3af|\U0001fa80|\U0001fa81|\U0001f3b1|\U0001f52e|\U0001f9ff|\U0001f3ae|\U0001f579|\U0001f3b0|\U0001f3b2|\U0001f9e9|\U0001f9f8|\u2660|\u2665|\u2666|\u2663|\u265f|\U0001f0cf|\U0001f004|\U0001f3b4|\U0001f3ad|\U0001f5bc|\U0001f3a8|\U0001f9f5|\U0001f9f6|\U0001f453|\U0001f576|\U0001f97d|\U0001f97c|\U0001f9ba|\U0001f454|\U0001f455|\U0001f456|\U0001f9e3|\U0001f9e4|\U0001f9e5|\U0001f9e6|\U0001f457|\U0001f458|\U0001f97b|\U0001fa71|\U0001fa72|\U0001fa73|\U0001f459|\U0001f45a|\U0001f45b|\U0001f45c|\U0001f45d|\U0001f6cd|\U0001f392|\U0001f45e|\U0001f45f|\U0001f97e|\U0001f97f|\U0001f460|\U0001f461|\U0001fa70|\U0001f462|\U0001f451|\U0001f452|\U0001f3a9|\U0001f393|\U0001f9e2|\u26d1|\U0001f4ff|\U0001f484|\U0001f48d|\U0001f48e|\U0001f507|\U0001f508|\U0001f509|\U0001f50a|\U0001f4e2|\U0001f4e3|\U0001f4ef|\U0001f514|\U0001f515|\U0001f3bc|\U0001f3b5|\U0001f3b6|\U0001f399|\U0001f39a|\U0001f39b|\U0001f3a4|\U0001f3a7|\U0001f4fb|\U0001f3b7|\U0001f3b8|\U0001f3b9|\U0001f3ba|\U0001f3bb|\U0001fa95|\U0001f941|\U0001f4f1|\U0001f4f2|\u260e|\U0001f4de|\U0001f4df|\U0001f4e0|\U0001f50b|\U0001f50c|\U0001f4bb|\U0001f5a5|\U0001f5a8|\u2328|\U0001f5b1|\U0001f5b2|\U0001f4bd|\U0001f4be|\U0001f4bf|\U0001f4c0|\U0001f9ee|\U0001f3a5|\U0001f39e|\U0001f4fd|\U0001f3ac|\U0001f4fa|\U0001f4f7|\U0001f4f8|\U0001f4f9|\U0001f4fc|\U0001f50d|\U0001f50e|\U0001f56f|\U0001f4a1|\U0001f526|\U0001f3ee|\U0001fa94|\U0001f4d4|\U0001f4d5|\U0001f4d6|\U0001f4d7|\U0001f4d8|\U0001f4d9|\U0001f4da|\U0001f4d3|\U0001f4d2|\U0001f4c3|\U0001f4dc|\U0001f4c4|\U0001f4f0|\U0001f5de|\U0001f4d1|\U0001f516|\U0001f3f7|\U0001f4b0|\U0001f4b4|\U0001f4b5|\U0001f4b6|\U0001f4b7|\U0001f4b8|\U0001f4b3|\U0001f9fe|\U0001f4b9|\U0001f4b1|\U0001f4b2|\u2709|\U0001f4e7|\U0001f4e8|\U0001f4e9|\U0001f4e4|\U0001f4e5|\U0001f4e6|\U0001f4eb|\U0001f4ea|\U0001f4ec|\U0001f4ed|\U0001f4ee|\U0001f5f3|\u270f|\u2712|\U0001f58b|\U0001f58a|\U0001f58c|\U0001f58d|\U0001f4dd|\U0001f4bc|\U0001f4c1|\U0001f4c2|\U0001f5c2|\U0001f4c5|\U0001f4c6|\U0001f5d2|\U0001f5d3|\U0001f4c7|\U0001f4c8|\U0001f4c9|\U0001f4ca|\U0001f4cb|\U0001f4cc|\U0001f4cd|\U0001f4ce|\U0001f587|\U0001f4cf|\U0001f4d0|\u2702|\U0001f5c3|\U0001f5c4|\U0001f5d1|\U0001f512|\U0001f513|\U0001f50f|\U0001f510|\U0001f511|\U0001f5dd|\U0001f528|\U0001fa93|\u26cf|\u2692|\U0001f6e0|\U0001f5e1|\u2694|\U0001f52b|\U0001f3f9|\U0001f6e1|\U0001f527|\U0001f529|\u2699|\U0001f5dc|\u2696|\U0001f9af|\U0001f517|\u26d3|\U0001f9f0|\U0001f9f2|\u2697|\U0001f9ea|\U0001f9eb|\U0001f9ec|\U0001f52c|\U0001f52d|\U0001f4e1|\U0001f489|\U0001fa78|\U0001f48a|\U0001fa79|\U0001fa7a|\U0001f6aa|\U0001f6cf|\U0001f6cb|\U0001fa91|\U0001f6bd|\U0001f6bf|\U0001f6c1|\U0001fa92|\U0001f9f4|\U0001f9f7|\U0001f9f9|\U0001f9fa|\U0001f9fb|\U0001f9fc|\U0001f9fd|\U0001f9ef|\U0001f6d2|\U0001f6ac|\u26b0|\u26b1|\U0001f5ff|\U0001f3e7|\U0001f6ae|\U0001f6b0|\u267f|\U0001f6b9|\U0001f6ba|\U0001f6bb|\U0001f6bc|\U0001f6be|\U0001f6c2|\U0001f6c3|\U0001f6c4|\U0001f6c5|\u26a0|\U0001f6b8|\u26d4|\U0001f6ab|\U0001f6b3|\U0001f6ad|\U0001f6af|\U0001f6b1|\U0001f6b7|\U0001f4f5|\U0001f51e|\u2622|\u2623|\u2b06|\u2197|\u27a1|\u2198|\u2b07|\u2199|\u2b05|\u2196|\u2195|\u2194|\u21a9|\u21aa|\u2934|\u2935|\U0001f503|\U0001f504|\U0001f519|\U0001f51a|\U0001f51b|\U0001f51c|\U0001f51d|\U0001f6d0|\u269b|\U0001f549|\u2721|\u2638|\u262f|\u271d|\u2626|\u262a|\u262e|\U0001f54e|\U0001f52f|\u2648|\u2649|\u264a|\u264b|\u264c|\u264d|\u264e|\u264f|\u2650|\u2651|\u2652|\u2653|\u26ce|\U0001f500|\U0001f501|\U0001f502|\u25b6|\u23e9|\u23ed|\u23ef|\u25c0|\u23ea|\u23ee|\U0001f53c|\u23eb|\U0001f53d|\u23ec|\u23f8|\u23f9|\u23fa|\u23cf|\U0001f3a6|\U0001f505|\U0001f506|\U0001f4f6|\U0001f4f3|\U0001f4f4|\u2640|\u2642|\u2695|\u267e|\u267b|\u269c|\U0001f531|\U0001f4db|\U0001f530|\u2b55|\u2705|\u2611|\u2714|\u2716|\u274c|\u274e|\u2795|\u2796|\u2797|\u27b0|\u27bf|\u303d|\u2733|\u2734|\u2747|\u203c|\u2049|\u2753|\u2754|\u2755|\u2757|\u3030|\xa9|\xae|\u2122|\U0001f51f|\U0001f520|\U0001f521|\U0001f522|\U0001f523|\U0001f524|\U0001f170|\U0001f18e|\U0001f171|\U0001f191|\U0001f192|\U0001f193|\u2139|\U0001f194|\u24c2|\U0001f195|\U0001f196|\U0001f17e|\U0001f197|\U0001f17f|\U0001f198|\U0001f199|\U0001f19a|\U0001f201|\U0001f202|\U0001f237|\U0001f236|\U0001f22f|\U0001f250|\U0001f239|\U0001f21a|\U0001f232|\U0001f251|\U0001f238|\U0001f234|\U0001f233|\u3297|\u3299|\U0001f23a|\U0001f235|\U0001f534|\U0001f7e0|\U0001f7e1|\U0001f7e2|\U0001f535|\U0001f7e3|\U0001f7e4|\u26ab|\u26aa|\U0001f7e5|\U0001f7e7|\U0001f7e8|\U0001f7e9|\U0001f7e6|\U0001f7ea|\U0001f7eb|\u2b1b|\u2b1c|\u25fc|\u25fb|\u25fe|\u25fd|\u25aa|\u25ab|\U0001f536|\U0001f537|\U0001f538|\U0001f539|\U0001f53a|\U0001f53b|\U0001f4a0|\U0001f518|\U0001f533|\U0001f532|\U0001f3c1|\U0001f6a9|\U0001f38c|\U0001f3f4|\U0001f3f3')) -> str:
"""Return the string obtained by replacing all emojis in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all emojis in text with 'repl'
Reference:
akkez/emoji.py: Python emoji regexp / python emoji detection
https://gist.github.com/akkez/99ceeae2f13c9d8d9be7df0279e2c438
"""
text = regex.sub(repl, text)
return text
def _email_normalize(self, text: str, repl: str, regex=re.compile(r'[a-zA-Z0-9.!#$%&\'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9.]+')) -> str:
"""Return the string obtained by replacing all email addresses in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all email addresses in text with 'repl'
"""
text = regex.sub(repl, text)
return text
def _tel_normalize(self, text: str, repl: str, regex=re.compile(r'[()+\d.\-]*[ ]?\d{2,4}[-. ]+\d{3,4}[-. ]+\d{3,4}')) -> str:
"""Return the string obtained by replacing all phone numbers in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all phone numbers in text with 'repl'
"""
text = regex.sub(repl, text)
return text
| 946.587629
| 87,778
| 0.831418
|
import re
class Normalizer:
def __init__(self, url_repl='[URL]', tag_repl='[TAG]', emoji_repl='[EMOJI]', email_repl='[EMAIL]', tel_repl='[TEL]'):
self.url_repl = url_repl
self.tag_repl = tag_repl
self.emoji_repl = emoji_repl
self.email_repl = email_repl
self.tel_repl = tel_repl
self._normalize = []
self._init_normalize()
def normalize(self, text: str) -> str:
for normalize_fn, repl in self._normalize:
text = normalize_fn(text, repl)
return text
def _init_normalize(self) -> None:
if self.url_repl is not None:
self._normalize.append((self._url_normalize, self.url_repl))
if self.tag_repl is not None:
self._normalize.append((self._tag_normalize, self.tag_repl))
if self.emoji_repl is not None:
self._normalize.append((self._emoji_normalize, self.emoji_repl))
if self.email_repl is not None:
self._normalize.append((self._email_normalize, self.email_repl))
if self.tel_repl is not None:
self._normalize.append((self._tel_normalize, self.tel_repl))
def _url_normalize(self, text: str, repl: str, regex=re.compile(r'(https?|ftp|www)\S+')) -> str:
text = regex.sub(repl, text)
return text
def _tag_normalize(self, text: str, repl: str, regex=re.compile(r'<[^>]*>')) -> str:
text = regex.sub(repl, text)
return text
def _emoji_normalize(self, text: str, repl: str, regex=re.compile(r'\U0001f469\u200d\u2764\ufe0f\u200d\U0001f48b\u200d\U0001f468|\U0001f468\u200d\u2764\ufe0f\u200d\U0001f48b\u200d\U0001f468|\U0001f469\u200d\u2764\ufe0f\u200d\U0001f48b\u200d\U0001f469|\U0001f9d1\U0001f3fb\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fc\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fc\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fd\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fd\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fd\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fe|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fe|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3ff|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f469\U0001f3fc|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f469\U0001f3fc|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f469\U0001f3fd|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fc|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fd|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fe|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f468\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f468\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f468\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\u200d\u2764\u200d\U0001f48b\u200d\U0001f468|\U0001f468\u200d\u2764\u200d\U0001f48b\u200d\U0001f468|\U0001f469\u200d\u2764\u200d\U0001f48b\u200d\U0001f469|\U0001f468\u200d\U0001f469\u200d\U0001f467\u200d\U0001f466|\U0001f468\u200d\U0001f469\u200d\U0001f466\u200d\U0001f466|\U0001f468\u200d\U0001f469\u200d\U0001f467\u200d\U0001f467|\U0001f468\u200d\U0001f468\u200d\U0001f467\u200d\U0001f466|\U0001f468\u200d\U0001f468\u200d\U0001f466\u200d\U0001f466|\U0001f468\u200d\U0001f468\u200d\U0001f467\u200d\U0001f467|\U0001f469\u200d\U0001f469\u200d\U0001f467\u200d\U0001f466|\U0001f469\u200d\U0001f469\u200d\U0001f466\u200d\U0001f466|\U0001f469\u200d\U0001f469\u200d\U0001f467\u200d\U0001f467|\U0001f3f4\U000e0067\U000e0062\U000e0065\U000e006e\U000e0067\U000e007f|\U0001f3f4\U000e0067\U000e0062\U000e0073\U000e0063\U000e0074\U000e007f|\U0001f3f4\U000e0067\U000e0062\U000e0077\U000e006c\U000e0073\U000e007f|\U0001f469\u200d\u2764\ufe0f\u200d\U0001f468|\U0001f468\u200d\u2764\ufe0f\u200d\U0001f468|\U0001f469\u200d\u2764\ufe0f\u200d\U0001f469|\U0001f441\ufe0f\u200d\U0001f5e8\ufe0f|\U0001f471\U0001f3fb\u200d\u2642\ufe0f|\U0001f471\U0001f3fc\u200d\u2642\ufe0f|\U0001f471\U0001f3fd\u200d\u2642\ufe0f|\U0001f471\U0001f3fe\u200d\u2642\ufe0f|\U0001f471\U0001f3ff\u200d\u2642\ufe0f|\U0001f471\U0001f3fb\u200d\u2640\ufe0f|\U0001f471\U0001f3fc\u200d\u2640\ufe0f|\U0001f471\U0001f3fd\u200d\u2640\ufe0f|\U0001f471\U0001f3fe\u200d\u2640\ufe0f|\U0001f471\U0001f3ff\u200d\u2640\ufe0f|\U0001f64d\U0001f3fb\u200d\u2642\ufe0f|\U0001f64d\U0001f3fc\u200d\u2642\ufe0f|\U0001f64d\U0001f3fd\u200d\u2642\ufe0f|\U0001f64d\U0001f3fe\u200d\u2642\ufe0f|\U0001f64d\U0001f3ff\u200d\u2642\ufe0f|\U0001f64d\U0001f3fb\u200d\u2640\ufe0f|\U0001f64d\U0001f3fc\u200d\u2640\ufe0f|\U0001f64d\U0001f3fd\u200d\u2640\ufe0f|\U0001f64d\U0001f3fe\u200d\u2640\ufe0f|\U0001f64d\U0001f3ff\u200d\u2640\ufe0f|\U0001f64e\U0001f3fb\u200d\u2642\ufe0f|\U0001f64e\U0001f3fc\u200d\u2642\ufe0f|\U0001f64e\U0001f3fd\u200d\u2642\ufe0f|\U0001f64e\U0001f3fe\u200d\u2642\ufe0f|\U0001f64e\U0001f3ff\u200d\u2642\ufe0f|\U0001f64e\U0001f3fb\u200d\u2640\ufe0f|\U0001f64e\U0001f3fc\u200d\u2640\ufe0f|\U0001f64e\U0001f3fd\u200d\u2640\ufe0f|\U0001f64e\U0001f3fe\u200d\u2640\ufe0f|\U0001f64e\U0001f3ff\u200d\u2640\ufe0f|\U0001f645\U0001f3fb\u200d\u2642\ufe0f|\U0001f645\U0001f3fc\u200d\u2642\ufe0f|\U0001f645\U0001f3fd\u200d\u2642\ufe0f|\U0001f645\U0001f3fe\u200d\u2642\ufe0f|\U0001f645\U0001f3ff\u200d\u2642\ufe0f|\U0001f645\U0001f3fb\u200d\u2640\ufe0f|\U0001f645\U0001f3fc\u200d\u2640\ufe0f|\U0001f645\U0001f3fd\u200d\u2640\ufe0f|\U0001f645\U0001f3fe\u200d\u2640\ufe0f|\U0001f645\U0001f3ff\u200d\u2640\ufe0f|\U0001f646\U0001f3fb\u200d\u2642\ufe0f|\U0001f646\U0001f3fc\u200d\u2642\ufe0f|\U0001f646\U0001f3fd\u200d\u2642\ufe0f|\U0001f646\U0001f3fe\u200d\u2642\ufe0f|\U0001f646\U0001f3ff\u200d\u2642\ufe0f|\U0001f646\U0001f3fb\u200d\u2640\ufe0f|\U0001f646\U0001f3fc\u200d\u2640\ufe0f|\U0001f646\U0001f3fd\u200d\u2640\ufe0f|\U0001f646\U0001f3fe\u200d\u2640\ufe0f|\U0001f646\U0001f3ff\u200d\u2640\ufe0f|\U0001f481\U0001f3fb\u200d\u2642\ufe0f|\U0001f481\U0001f3fc\u200d\u2642\ufe0f|\U0001f481\U0001f3fd\u200d\u2642\ufe0f|\U0001f481\U0001f3fe\u200d\u2642\ufe0f|\U0001f481\U0001f3ff\u200d\u2642\ufe0f|\U0001f481\U0001f3fb\u200d\u2640\ufe0f|\U0001f481\U0001f3fc\u200d\u2640\ufe0f|\U0001f481\U0001f3fd\u200d\u2640\ufe0f|\U0001f481\U0001f3fe\u200d\u2640\ufe0f|\U0001f481\U0001f3ff\u200d\u2640\ufe0f|\U0001f64b\U0001f3fb\u200d\u2642\ufe0f|\U0001f64b\U0001f3fc\u200d\u2642\ufe0f|\U0001f64b\U0001f3fd\u200d\u2642\ufe0f|\U0001f64b\U0001f3fe\u200d\u2642\ufe0f|\U0001f64b\U0001f3ff\u200d\u2642\ufe0f|\U0001f64b\U0001f3fb\u200d\u2640\ufe0f|\U0001f64b\U0001f3fc\u200d\u2640\ufe0f|\U0001f64b\U0001f3fd\u200d\u2640\ufe0f|\U0001f64b\U0001f3fe\u200d\u2640\ufe0f|\U0001f64b\U0001f3ff\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fc\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fd\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fe\u200d\u2642\ufe0f|\U0001f9cf\U0001f3ff\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fc\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fd\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fe\u200d\u2640\ufe0f|\U0001f9cf\U0001f3ff\u200d\u2640\ufe0f|\U0001f647\U0001f3fb\u200d\u2642\ufe0f|\U0001f647\U0001f3fc\u200d\u2642\ufe0f|\U0001f647\U0001f3fd\u200d\u2642\ufe0f|\U0001f647\U0001f3fe\u200d\u2642\ufe0f|\U0001f647\U0001f3ff\u200d\u2642\ufe0f|\U0001f647\U0001f3fb\u200d\u2640\ufe0f|\U0001f647\U0001f3fc\u200d\u2640\ufe0f|\U0001f647\U0001f3fd\u200d\u2640\ufe0f|\U0001f647\U0001f3fe\u200d\u2640\ufe0f|\U0001f647\U0001f3ff\u200d\u2640\ufe0f|\U0001f926\U0001f3fb\u200d\u2642\ufe0f|\U0001f926\U0001f3fc\u200d\u2642\ufe0f|\U0001f926\U0001f3fd\u200d\u2642\ufe0f|\U0001f926\U0001f3fe\u200d\u2642\ufe0f|\U0001f926\U0001f3ff\u200d\u2642\ufe0f|\U0001f926\U0001f3fb\u200d\u2640\ufe0f|\U0001f926\U0001f3fc\u200d\u2640\ufe0f|\U0001f926\U0001f3fd\u200d\u2640\ufe0f|\U0001f926\U0001f3fe\u200d\u2640\ufe0f|\U0001f926\U0001f3ff\u200d\u2640\ufe0f|\U0001f937\U0001f3fb\u200d\u2642\ufe0f|\U0001f937\U0001f3fc\u200d\u2642\ufe0f|\U0001f937\U0001f3fd\u200d\u2642\ufe0f|\U0001f937\U0001f3fe\u200d\u2642\ufe0f|\U0001f937\U0001f3ff\u200d\u2642\ufe0f|\U0001f937\U0001f3fb\u200d\u2640\ufe0f|\U0001f937\U0001f3fc\u200d\u2640\ufe0f|\U0001f937\U0001f3fd\u200d\u2640\ufe0f|\U0001f937\U0001f3fe\u200d\u2640\ufe0f|\U0001f937\U0001f3ff\u200d\u2640\ufe0f|\U0001f468\U0001f3fb\u200d\u2695\ufe0f|\U0001f468\U0001f3fc\u200d\u2695\ufe0f|\U0001f468\U0001f3fd\u200d\u2695\ufe0f|\U0001f468\U0001f3fe\u200d\u2695\ufe0f|\U0001f468\U0001f3ff\u200d\u2695\ufe0f|\U0001f469\U0001f3fb\u200d\u2695\ufe0f|\U0001f469\U0001f3fc\u200d\u2695\ufe0f|\U0001f469\U0001f3fd\u200d\u2695\ufe0f|\U0001f469\U0001f3fe\u200d\u2695\ufe0f|\U0001f469\U0001f3ff\u200d\u2695\ufe0f|\U0001f468\U0001f3fb\u200d\u2696\ufe0f|\U0001f468\U0001f3fc\u200d\u2696\ufe0f|\U0001f468\U0001f3fd\u200d\u2696\ufe0f|\U0001f468\U0001f3fe\u200d\u2696\ufe0f|\U0001f468\U0001f3ff\u200d\u2696\ufe0f|\U0001f469\U0001f3fb\u200d\u2696\ufe0f|\U0001f469\U0001f3fc\u200d\u2696\ufe0f|\U0001f469\U0001f3fd\u200d\u2696\ufe0f|\U0001f469\U0001f3fe\u200d\u2696\ufe0f|\U0001f469\U0001f3ff\u200d\u2696\ufe0f|\U0001f468\U0001f3fb\u200d\u2708\ufe0f|\U0001f468\U0001f3fc\u200d\u2708\ufe0f|\U0001f468\U0001f3fd\u200d\u2708\ufe0f|\U0001f468\U0001f3fe\u200d\u2708\ufe0f|\U0001f468\U0001f3ff\u200d\u2708\ufe0f|\U0001f469\U0001f3fb\u200d\u2708\ufe0f|\U0001f469\U0001f3fc\u200d\u2708\ufe0f|\U0001f469\U0001f3fd\u200d\u2708\ufe0f|\U0001f469\U0001f3fe\u200d\u2708\ufe0f|\U0001f469\U0001f3ff\u200d\u2708\ufe0f|\U0001f46e\U0001f3fb\u200d\u2642\ufe0f|\U0001f46e\U0001f3fc\u200d\u2642\ufe0f|\U0001f46e\U0001f3fd\u200d\u2642\ufe0f|\U0001f46e\U0001f3fe\u200d\u2642\ufe0f|\U0001f46e\U0001f3ff\u200d\u2642\ufe0f|\U0001f46e\U0001f3fb\u200d\u2640\ufe0f|\U0001f46e\U0001f3fc\u200d\u2640\ufe0f|\U0001f46e\U0001f3fd\u200d\u2640\ufe0f|\U0001f46e\U0001f3fe\u200d\u2640\ufe0f|\U0001f46e\U0001f3ff\u200d\u2640\ufe0f|\U0001f575\ufe0f\u200d\u2642\ufe0f|\U0001f575\U0001f3fb\u200d\u2642\ufe0f|\U0001f575\U0001f3fc\u200d\u2642\ufe0f|\U0001f575\U0001f3fd\u200d\u2642\ufe0f|\U0001f575\U0001f3fe\u200d\u2642\ufe0f|\U0001f575\U0001f3ff\u200d\u2642\ufe0f|\U0001f575\ufe0f\u200d\u2640\ufe0f|\U0001f575\U0001f3fb\u200d\u2640\ufe0f|\U0001f575\U0001f3fc\u200d\u2640\ufe0f|\U0001f575\U0001f3fd\u200d\u2640\ufe0f|\U0001f575\U0001f3fe\u200d\u2640\ufe0f|\U0001f575\U0001f3ff\u200d\u2640\ufe0f|\U0001f482\U0001f3fb\u200d\u2642\ufe0f|\U0001f482\U0001f3fc\u200d\u2642\ufe0f|\U0001f482\U0001f3fd\u200d\u2642\ufe0f|\U0001f482\U0001f3fe\u200d\u2642\ufe0f|\U0001f482\U0001f3ff\u200d\u2642\ufe0f|\U0001f482\U0001f3fb\u200d\u2640\ufe0f|\U0001f482\U0001f3fc\u200d\u2640\ufe0f|\U0001f482\U0001f3fd\u200d\u2640\ufe0f|\U0001f482\U0001f3fe\u200d\u2640\ufe0f|\U0001f482\U0001f3ff\u200d\u2640\ufe0f|\U0001f477\U0001f3fb\u200d\u2642\ufe0f|\U0001f477\U0001f3fc\u200d\u2642\ufe0f|\U0001f477\U0001f3fd\u200d\u2642\ufe0f|\U0001f477\U0001f3fe\u200d\u2642\ufe0f|\U0001f477\U0001f3ff\u200d\u2642\ufe0f|\U0001f477\U0001f3fb\u200d\u2640\ufe0f|\U0001f477\U0001f3fc\u200d\u2640\ufe0f|\U0001f477\U0001f3fd\u200d\u2640\ufe0f|\U0001f477\U0001f3fe\u200d\u2640\ufe0f|\U0001f477\U0001f3ff\u200d\u2640\ufe0f|\U0001f473\U0001f3fb\u200d\u2642\ufe0f|\U0001f473\U0001f3fc\u200d\u2642\ufe0f|\U0001f473\U0001f3fd\u200d\u2642\ufe0f|\U0001f473\U0001f3fe\u200d\u2642\ufe0f|\U0001f473\U0001f3ff\u200d\u2642\ufe0f|\U0001f473\U0001f3fb\u200d\u2640\ufe0f|\U0001f473\U0001f3fc\u200d\u2640\ufe0f|\U0001f473\U0001f3fd\u200d\u2640\ufe0f|\U0001f473\U0001f3fe\u200d\u2640\ufe0f|\U0001f473\U0001f3ff\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fc\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fd\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fe\u200d\u2642\ufe0f|\U0001f9b8\U0001f3ff\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fc\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fd\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fe\u200d\u2640\ufe0f|\U0001f9b8\U0001f3ff\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fc\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fd\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fe\u200d\u2642\ufe0f|\U0001f9b9\U0001f3ff\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fc\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fd\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fe\u200d\u2640\ufe0f|\U0001f9b9\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d9\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d9\U0001f3ff\u200d\u2640\ufe0f|\U0001f9da\U0001f3fb\u200d\u2642\ufe0f|\U0001f9da\U0001f3fc\u200d\u2642\ufe0f|\U0001f9da\U0001f3fd\u200d\u2642\ufe0f|\U0001f9da\U0001f3fe\u200d\u2642\ufe0f|\U0001f9da\U0001f3ff\u200d\u2642\ufe0f|\U0001f9da\U0001f3fb\u200d\u2640\ufe0f|\U0001f9da\U0001f3fc\u200d\u2640\ufe0f|\U0001f9da\U0001f3fd\u200d\u2640\ufe0f|\U0001f9da\U0001f3fe\u200d\u2640\ufe0f|\U0001f9da\U0001f3ff\u200d\u2640\ufe0f|\U0001f9db\U0001f3fb\u200d\u2642\ufe0f|\U0001f9db\U0001f3fc\u200d\u2642\ufe0f|\U0001f9db\U0001f3fd\u200d\u2642\ufe0f|\U0001f9db\U0001f3fe\u200d\u2642\ufe0f|\U0001f9db\U0001f3ff\u200d\u2642\ufe0f|\U0001f9db\U0001f3fb\u200d\u2640\ufe0f|\U0001f9db\U0001f3fc\u200d\u2640\ufe0f|\U0001f9db\U0001f3fd\u200d\u2640\ufe0f|\U0001f9db\U0001f3fe\u200d\u2640\ufe0f|\U0001f9db\U0001f3ff\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fc\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fd\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fe\u200d\u2642\ufe0f|\U0001f9dc\U0001f3ff\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fc\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fd\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fe\u200d\u2640\ufe0f|\U0001f9dc\U0001f3ff\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fc\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fd\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fe\u200d\u2642\ufe0f|\U0001f9dd\U0001f3ff\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fc\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fd\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fe\u200d\u2640\ufe0f|\U0001f9dd\U0001f3ff\u200d\u2640\ufe0f|\U0001f486\U0001f3fb\u200d\u2642\ufe0f|\U0001f486\U0001f3fc\u200d\u2642\ufe0f|\U0001f486\U0001f3fd\u200d\u2642\ufe0f|\U0001f486\U0001f3fe\u200d\u2642\ufe0f|\U0001f486\U0001f3ff\u200d\u2642\ufe0f|\U0001f486\U0001f3fb\u200d\u2640\ufe0f|\U0001f486\U0001f3fc\u200d\u2640\ufe0f|\U0001f486\U0001f3fd\u200d\u2640\ufe0f|\U0001f486\U0001f3fe\u200d\u2640\ufe0f|\U0001f486\U0001f3ff\u200d\u2640\ufe0f|\U0001f487\U0001f3fb\u200d\u2642\ufe0f|\U0001f487\U0001f3fc\u200d\u2642\ufe0f|\U0001f487\U0001f3fd\u200d\u2642\ufe0f|\U0001f487\U0001f3fe\u200d\u2642\ufe0f|\U0001f487\U0001f3ff\u200d\u2642\ufe0f|\U0001f487\U0001f3fb\u200d\u2640\ufe0f|\U0001f487\U0001f3fc\u200d\u2640\ufe0f|\U0001f487\U0001f3fd\u200d\u2640\ufe0f|\U0001f487\U0001f3fe\u200d\u2640\ufe0f|\U0001f487\U0001f3ff\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fc\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fd\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fe\u200d\u2642\ufe0f|\U0001f6b6\U0001f3ff\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fc\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fd\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fe\u200d\u2640\ufe0f|\U0001f6b6\U0001f3ff\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fc\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fd\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fe\u200d\u2642\ufe0f|\U0001f9cd\U0001f3ff\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fc\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fd\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fe\u200d\u2640\ufe0f|\U0001f9cd\U0001f3ff\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fc\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fd\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fe\u200d\u2642\ufe0f|\U0001f9ce\U0001f3ff\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fc\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fd\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fe\u200d\u2640\ufe0f|\U0001f9ce\U0001f3ff\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fc\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fd\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fe\u200d\u2642\ufe0f|\U0001f3c3\U0001f3ff\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fc\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fd\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fe\u200d\u2640\ufe0f|\U0001f3c3\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d6\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d6\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d7\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d7\U0001f3ff\u200d\u2640\ufe0f|\U0001f3cc\ufe0f\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fb\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fc\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fd\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fe\u200d\u2642\ufe0f|\U0001f3cc\U0001f3ff\u200d\u2642\ufe0f|\U0001f3cc\ufe0f\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fb\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fc\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fd\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fe\u200d\u2640\ufe0f|\U0001f3cc\U0001f3ff\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fc\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fd\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fe\u200d\u2642\ufe0f|\U0001f3c4\U0001f3ff\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fc\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fd\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fe\u200d\u2640\ufe0f|\U0001f3c4\U0001f3ff\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fc\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fd\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fe\u200d\u2642\ufe0f|\U0001f6a3\U0001f3ff\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fc\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fd\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fe\u200d\u2640\ufe0f|\U0001f6a3\U0001f3ff\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fc\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fd\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fe\u200d\u2642\ufe0f|\U0001f3ca\U0001f3ff\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fc\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fd\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fe\u200d\u2640\ufe0f|\U0001f3ca\U0001f3ff\u200d\u2640\ufe0f|\u26f9\ufe0f\u200d\u2642\ufe0f|\u26f9\U0001f3fb\u200d\u2642\ufe0f|\u26f9\U0001f3fc\u200d\u2642\ufe0f|\u26f9\U0001f3fd\u200d\u2642\ufe0f|\u26f9\U0001f3fe\u200d\u2642\ufe0f|\u26f9\U0001f3ff\u200d\u2642\ufe0f|\u26f9\ufe0f\u200d\u2640\ufe0f|\u26f9\U0001f3fb\u200d\u2640\ufe0f|\u26f9\U0001f3fc\u200d\u2640\ufe0f|\u26f9\U0001f3fd\u200d\u2640\ufe0f|\u26f9\U0001f3fe\u200d\u2640\ufe0f|\u26f9\U0001f3ff\u200d\u2640\ufe0f|\U0001f3cb\ufe0f\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fb\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fc\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fd\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fe\u200d\u2642\ufe0f|\U0001f3cb\U0001f3ff\u200d\u2642\ufe0f|\U0001f3cb\ufe0f\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fb\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fc\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fd\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fe\u200d\u2640\ufe0f|\U0001f3cb\U0001f3ff\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fc\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fd\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fe\u200d\u2642\ufe0f|\U0001f6b4\U0001f3ff\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fc\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fd\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fe\u200d\u2640\ufe0f|\U0001f6b4\U0001f3ff\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fc\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fd\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fe\u200d\u2642\ufe0f|\U0001f6b5\U0001f3ff\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fc\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fd\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fe\u200d\u2640\ufe0f|\U0001f6b5\U0001f3ff\u200d\u2640\ufe0f|\U0001f938\U0001f3fb\u200d\u2642\ufe0f|\U0001f938\U0001f3fc\u200d\u2642\ufe0f|\U0001f938\U0001f3fd\u200d\u2642\ufe0f|\U0001f938\U0001f3fe\u200d\u2642\ufe0f|\U0001f938\U0001f3ff\u200d\u2642\ufe0f|\U0001f938\U0001f3fb\u200d\u2640\ufe0f|\U0001f938\U0001f3fc\u200d\u2640\ufe0f|\U0001f938\U0001f3fd\u200d\u2640\ufe0f|\U0001f938\U0001f3fe\u200d\u2640\ufe0f|\U0001f938\U0001f3ff\u200d\u2640\ufe0f|\U0001f93d\U0001f3fb\u200d\u2642\ufe0f|\U0001f93d\U0001f3fc\u200d\u2642\ufe0f|\U0001f93d\U0001f3fd\u200d\u2642\ufe0f|\U0001f93d\U0001f3fe\u200d\u2642\ufe0f|\U0001f93d\U0001f3ff\u200d\u2642\ufe0f|\U0001f93d\U0001f3fb\u200d\u2640\ufe0f|\U0001f93d\U0001f3fc\u200d\u2640\ufe0f|\U0001f93d\U0001f3fd\u200d\u2640\ufe0f|\U0001f93d\U0001f3fe\u200d\u2640\ufe0f|\U0001f93d\U0001f3ff\u200d\u2640\ufe0f|\U0001f93e\U0001f3fb\u200d\u2642\ufe0f|\U0001f93e\U0001f3fc\u200d\u2642\ufe0f|\U0001f93e\U0001f3fd\u200d\u2642\ufe0f|\U0001f93e\U0001f3fe\u200d\u2642\ufe0f|\U0001f93e\U0001f3ff\u200d\u2642\ufe0f|\U0001f93e\U0001f3fb\u200d\u2640\ufe0f|\U0001f93e\U0001f3fc\u200d\u2640\ufe0f|\U0001f93e\U0001f3fd\u200d\u2640\ufe0f|\U0001f93e\U0001f3fe\u200d\u2640\ufe0f|\U0001f93e\U0001f3ff\u200d\u2640\ufe0f|\U0001f939\U0001f3fb\u200d\u2642\ufe0f|\U0001f939\U0001f3fc\u200d\u2642\ufe0f|\U0001f939\U0001f3fd\u200d\u2642\ufe0f|\U0001f939\U0001f3fe\u200d\u2642\ufe0f|\U0001f939\U0001f3ff\u200d\u2642\ufe0f|\U0001f939\U0001f3fb\u200d\u2640\ufe0f|\U0001f939\U0001f3fc\u200d\u2640\ufe0f|\U0001f939\U0001f3fd\u200d\u2640\ufe0f|\U0001f939\U0001f3fe\u200d\u2640\ufe0f|\U0001f939\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d8\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d8\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d1\u200d\U0001f91d\u200d\U0001f9d1|\U0001f469\u200d\u2764\u200d\U0001f468|\U0001f468\u200d\u2764\u200d\U0001f468|\U0001f469\u200d\u2764\u200d\U0001f469|\U0001f468\u200d\U0001f469\u200d\U0001f466|\U0001f468\u200d\U0001f469\u200d\U0001f467|\U0001f468\u200d\U0001f468\u200d\U0001f466|\U0001f468\u200d\U0001f468\u200d\U0001f467|\U0001f469\u200d\U0001f469\u200d\U0001f466|\U0001f469\u200d\U0001f469\u200d\U0001f467|\U0001f468\u200d\U0001f466\u200d\U0001f466|\U0001f468\u200d\U0001f467\u200d\U0001f466|\U0001f468\u200d\U0001f467\u200d\U0001f467|\U0001f469\u200d\U0001f466\u200d\U0001f466|\U0001f469\u200d\U0001f467\u200d\U0001f466|\U0001f469\u200d\U0001f467\u200d\U0001f467|\U0001f441\u200d\U0001f5e8\ufe0f|\U0001f441\ufe0f\u200d\U0001f5e8|\U0001f471\u200d\u2642\ufe0f|\U0001f471\U0001f3fb\u200d\u2642|\U0001f471\U0001f3fc\u200d\u2642|\U0001f471\U0001f3fd\u200d\u2642|\U0001f471\U0001f3fe\u200d\u2642|\U0001f471\U0001f3ff\u200d\u2642|\U0001f468\U0001f3fb\u200d\U0001f9b0|\U0001f468\U0001f3fc\u200d\U0001f9b0|\U0001f468\U0001f3fd\u200d\U0001f9b0|\U0001f468\U0001f3fe\u200d\U0001f9b0|\U0001f468\U0001f3ff\u200d\U0001f9b0|\U0001f468\U0001f3fb\u200d\U0001f9b1|\U0001f468\U0001f3fc\u200d\U0001f9b1|\U0001f468\U0001f3fd\u200d\U0001f9b1|\U0001f468\U0001f3fe\u200d\U0001f9b1|\U0001f468\U0001f3ff\u200d\U0001f9b1|\U0001f468\U0001f3fb\u200d\U0001f9b3|\U0001f468\U0001f3fc\u200d\U0001f9b3|\U0001f468\U0001f3fd\u200d\U0001f9b3|\U0001f468\U0001f3fe\u200d\U0001f9b3|\U0001f468\U0001f3ff\u200d\U0001f9b3|\U0001f468\U0001f3fb\u200d\U0001f9b2|\U0001f468\U0001f3fc\u200d\U0001f9b2|\U0001f468\U0001f3fd\u200d\U0001f9b2|\U0001f468\U0001f3fe\u200d\U0001f9b2|\U0001f468\U0001f3ff\u200d\U0001f9b2|\U0001f471\u200d\u2640\ufe0f|\U0001f471\U0001f3fb\u200d\u2640|\U0001f471\U0001f3fc\u200d\u2640|\U0001f471\U0001f3fd\u200d\u2640|\U0001f471\U0001f3fe\u200d\u2640|\U0001f471\U0001f3ff\u200d\u2640|\U0001f469\U0001f3fb\u200d\U0001f9b0|\U0001f469\U0001f3fc\u200d\U0001f9b0|\U0001f469\U0001f3fd\u200d\U0001f9b0|\U0001f469\U0001f3fe\u200d\U0001f9b0|\U0001f469\U0001f3ff\u200d\U0001f9b0|\U0001f469\U0001f3fb\u200d\U0001f9b1|\U0001f469\U0001f3fc\u200d\U0001f9b1|\U0001f469\U0001f3fd\u200d\U0001f9b1|\U0001f469\U0001f3fe\u200d\U0001f9b1|\U0001f469\U0001f3ff\u200d\U0001f9b1|\U0001f469\U0001f3fb\u200d\U0001f9b3|\U0001f469\U0001f3fc\u200d\U0001f9b3|\U0001f469\U0001f3fd\u200d\U0001f9b3|\U0001f469\U0001f3fe\u200d\U0001f9b3|\U0001f469\U0001f3ff\u200d\U0001f9b3|\U0001f469\U0001f3fb\u200d\U0001f9b2|\U0001f469\U0001f3fc\u200d\U0001f9b2|\U0001f469\U0001f3fd\u200d\U0001f9b2|\U0001f469\U0001f3fe\u200d\U0001f9b2|\U0001f469\U0001f3ff\u200d\U0001f9b2|\U0001f64d\u200d\u2642\ufe0f|\U0001f64d\U0001f3fb\u200d\u2642|\U0001f64d\U0001f3fc\u200d\u2642|\U0001f64d\U0001f3fd\u200d\u2642|\U0001f64d\U0001f3fe\u200d\u2642|\U0001f64d\U0001f3ff\u200d\u2642|\U0001f64d\u200d\u2640\ufe0f|\U0001f64d\U0001f3fb\u200d\u2640|\U0001f64d\U0001f3fc\u200d\u2640|\U0001f64d\U0001f3fd\u200d\u2640|\U0001f64d\U0001f3fe\u200d\u2640|\U0001f64d\U0001f3ff\u200d\u2640|\U0001f64e\u200d\u2642\ufe0f|\U0001f64e\U0001f3fb\u200d\u2642|\U0001f64e\U0001f3fc\u200d\u2642|\U0001f64e\U0001f3fd\u200d\u2642|\U0001f64e\U0001f3fe\u200d\u2642|\U0001f64e\U0001f3ff\u200d\u2642|\U0001f64e\u200d\u2640\ufe0f|\U0001f64e\U0001f3fb\u200d\u2640|\U0001f64e\U0001f3fc\u200d\u2640|\U0001f64e\U0001f3fd\u200d\u2640|\U0001f64e\U0001f3fe\u200d\u2640|\U0001f64e\U0001f3ff\u200d\u2640|\U0001f645\u200d\u2642\ufe0f|\U0001f645\U0001f3fb\u200d\u2642|\U0001f645\U0001f3fc\u200d\u2642|\U0001f645\U0001f3fd\u200d\u2642|\U0001f645\U0001f3fe\u200d\u2642|\U0001f645\U0001f3ff\u200d\u2642|\U0001f645\u200d\u2640\ufe0f|\U0001f645\U0001f3fb\u200d\u2640|\U0001f645\U0001f3fc\u200d\u2640|\U0001f645\U0001f3fd\u200d\u2640|\U0001f645\U0001f3fe\u200d\u2640|\U0001f645\U0001f3ff\u200d\u2640|\U0001f646\u200d\u2642\ufe0f|\U0001f646\U0001f3fb\u200d\u2642|\U0001f646\U0001f3fc\u200d\u2642|\U0001f646\U0001f3fd\u200d\u2642|\U0001f646\U0001f3fe\u200d\u2642|\U0001f646\U0001f3ff\u200d\u2642|\U0001f646\u200d\u2640\ufe0f|\U0001f646\U0001f3fb\u200d\u2640|\U0001f646\U0001f3fc\u200d\u2640|\U0001f646\U0001f3fd\u200d\u2640|\U0001f646\U0001f3fe\u200d\u2640|\U0001f646\U0001f3ff\u200d\u2640|\U0001f481\u200d\u2642\ufe0f|\U0001f481\U0001f3fb\u200d\u2642|\U0001f481\U0001f3fc\u200d\u2642|\U0001f481\U0001f3fd\u200d\u2642|\U0001f481\U0001f3fe\u200d\u2642|\U0001f481\U0001f3ff\u200d\u2642|\U0001f481\u200d\u2640\ufe0f|\U0001f481\U0001f3fb\u200d\u2640|\U0001f481\U0001f3fc\u200d\u2640|\U0001f481\U0001f3fd\u200d\u2640|\U0001f481\U0001f3fe\u200d\u2640|\U0001f481\U0001f3ff\u200d\u2640|\U0001f64b\u200d\u2642\ufe0f|\U0001f64b\U0001f3fb\u200d\u2642|\U0001f64b\U0001f3fc\u200d\u2642|\U0001f64b\U0001f3fd\u200d\u2642|\U0001f64b\U0001f3fe\u200d\u2642|\U0001f64b\U0001f3ff\u200d\u2642|\U0001f64b\u200d\u2640\ufe0f|\U0001f64b\U0001f3fb\u200d\u2640|\U0001f64b\U0001f3fc\u200d\u2640|\U0001f64b\U0001f3fd\u200d\u2640|\U0001f64b\U0001f3fe\u200d\u2640|\U0001f64b\U0001f3ff\u200d\u2640|\U0001f9cf\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2642|\U0001f9cf\U0001f3fc\u200d\u2642|\U0001f9cf\U0001f3fd\u200d\u2642|\U0001f9cf\U0001f3fe\u200d\u2642|\U0001f9cf\U0001f3ff\u200d\u2642|\U0001f9cf\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2640|\U0001f9cf\U0001f3fc\u200d\u2640|\U0001f9cf\U0001f3fd\u200d\u2640|\U0001f9cf\U0001f3fe\u200d\u2640|\U0001f9cf\U0001f3ff\u200d\u2640|\U0001f647\u200d\u2642\ufe0f|\U0001f647\U0001f3fb\u200d\u2642|\U0001f647\U0001f3fc\u200d\u2642|\U0001f647\U0001f3fd\u200d\u2642|\U0001f647\U0001f3fe\u200d\u2642|\U0001f647\U0001f3ff\u200d\u2642|\U0001f647\u200d\u2640\ufe0f|\U0001f647\U0001f3fb\u200d\u2640|\U0001f647\U0001f3fc\u200d\u2640|\U0001f647\U0001f3fd\u200d\u2640|\U0001f647\U0001f3fe\u200d\u2640|\U0001f647\U0001f3ff\u200d\u2640|\U0001f926\u200d\u2642\ufe0f|\U0001f926\U0001f3fb\u200d\u2642|\U0001f926\U0001f3fc\u200d\u2642|\U0001f926\U0001f3fd\u200d\u2642|\U0001f926\U0001f3fe\u200d\u2642|\U0001f926\U0001f3ff\u200d\u2642|\U0001f926\u200d\u2640\ufe0f|\U0001f926\U0001f3fb\u200d\u2640|\U0001f926\U0001f3fc\u200d\u2640|\U0001f926\U0001f3fd\u200d\u2640|\U0001f926\U0001f3fe\u200d\u2640|\U0001f926\U0001f3ff\u200d\u2640|\U0001f937\u200d\u2642\ufe0f|\U0001f937\U0001f3fb\u200d\u2642|\U0001f937\U0001f3fc\u200d\u2642|\U0001f937\U0001f3fd\u200d\u2642|\U0001f937\U0001f3fe\u200d\u2642|\U0001f937\U0001f3ff\u200d\u2642|\U0001f937\u200d\u2640\ufe0f|\U0001f937\U0001f3fb\u200d\u2640|\U0001f937\U0001f3fc\u200d\u2640|\U0001f937\U0001f3fd\u200d\u2640|\U0001f937\U0001f3fe\u200d\u2640|\U0001f937\U0001f3ff\u200d\u2640|\U0001f468\u200d\u2695\ufe0f|\U0001f468\U0001f3fb\u200d\u2695|\U0001f468\U0001f3fc\u200d\u2695|\U0001f468\U0001f3fd\u200d\u2695|\U0001f468\U0001f3fe\u200d\u2695|\U0001f468\U0001f3ff\u200d\u2695|\U0001f469\u200d\u2695\ufe0f|\U0001f469\U0001f3fb\u200d\u2695|\U0001f469\U0001f3fc\u200d\u2695|\U0001f469\U0001f3fd\u200d\u2695|\U0001f469\U0001f3fe\u200d\u2695|\U0001f469\U0001f3ff\u200d\u2695|\U0001f468\U0001f3fb\u200d\U0001f393|\U0001f468\U0001f3fc\u200d\U0001f393|\U0001f468\U0001f3fd\u200d\U0001f393|\U0001f468\U0001f3fe\u200d\U0001f393|\U0001f468\U0001f3ff\u200d\U0001f393|\U0001f469\U0001f3fb\u200d\U0001f393|\U0001f469\U0001f3fc\u200d\U0001f393|\U0001f469\U0001f3fd\u200d\U0001f393|\U0001f469\U0001f3fe\u200d\U0001f393|\U0001f469\U0001f3ff\u200d\U0001f393|\U0001f468\U0001f3fb\u200d\U0001f3eb|\U0001f468\U0001f3fc\u200d\U0001f3eb|\U0001f468\U0001f3fd\u200d\U0001f3eb|\U0001f468\U0001f3fe\u200d\U0001f3eb|\U0001f468\U0001f3ff\u200d\U0001f3eb|\U0001f469\U0001f3fb\u200d\U0001f3eb|\U0001f469\U0001f3fc\u200d\U0001f3eb|\U0001f469\U0001f3fd\u200d\U0001f3eb|\U0001f469\U0001f3fe\u200d\U0001f3eb|\U0001f469\U0001f3ff\u200d\U0001f3eb|\U0001f468\u200d\u2696\ufe0f|\U0001f468\U0001f3fb\u200d\u2696|\U0001f468\U0001f3fc\u200d\u2696|\U0001f468\U0001f3fd\u200d\u2696|\U0001f468\U0001f3fe\u200d\u2696|\U0001f468\U0001f3ff\u200d\u2696|\U0001f469\u200d\u2696\ufe0f|\U0001f469\U0001f3fb\u200d\u2696|\U0001f469\U0001f3fc\u200d\u2696|\U0001f469\U0001f3fd\u200d\u2696|\U0001f469\U0001f3fe\u200d\u2696|\U0001f469\U0001f3ff\u200d\u2696|\U0001f468\U0001f3fb\u200d\U0001f33e|\U0001f468\U0001f3fc\u200d\U0001f33e|\U0001f468\U0001f3fd\u200d\U0001f33e|\U0001f468\U0001f3fe\u200d\U0001f33e|\U0001f468\U0001f3ff\u200d\U0001f33e|\U0001f469\U0001f3fb\u200d\U0001f33e|\U0001f469\U0001f3fc\u200d\U0001f33e|\U0001f469\U0001f3fd\u200d\U0001f33e|\U0001f469\U0001f3fe\u200d\U0001f33e|\U0001f469\U0001f3ff\u200d\U0001f33e|\U0001f468\U0001f3fb\u200d\U0001f373|\U0001f468\U0001f3fc\u200d\U0001f373|\U0001f468\U0001f3fd\u200d\U0001f373|\U0001f468\U0001f3fe\u200d\U0001f373|\U0001f468\U0001f3ff\u200d\U0001f373|\U0001f469\U0001f3fb\u200d\U0001f373|\U0001f469\U0001f3fc\u200d\U0001f373|\U0001f469\U0001f3fd\u200d\U0001f373|\U0001f469\U0001f3fe\u200d\U0001f373|\U0001f469\U0001f3ff\u200d\U0001f373|\U0001f468\U0001f3fb\u200d\U0001f527|\U0001f468\U0001f3fc\u200d\U0001f527|\U0001f468\U0001f3fd\u200d\U0001f527|\U0001f468\U0001f3fe\u200d\U0001f527|\U0001f468\U0001f3ff\u200d\U0001f527|\U0001f469\U0001f3fb\u200d\U0001f527|\U0001f469\U0001f3fc\u200d\U0001f527|\U0001f469\U0001f3fd\u200d\U0001f527|\U0001f469\U0001f3fe\u200d\U0001f527|\U0001f469\U0001f3ff\u200d\U0001f527|\U0001f468\U0001f3fb\u200d\U0001f3ed|\U0001f468\U0001f3fc\u200d\U0001f3ed|\U0001f468\U0001f3fd\u200d\U0001f3ed|\U0001f468\U0001f3fe\u200d\U0001f3ed|\U0001f468\U0001f3ff\u200d\U0001f3ed|\U0001f469\U0001f3fb\u200d\U0001f3ed|\U0001f469\U0001f3fc\u200d\U0001f3ed|\U0001f469\U0001f3fd\u200d\U0001f3ed|\U0001f469\U0001f3fe\u200d\U0001f3ed|\U0001f469\U0001f3ff\u200d\U0001f3ed|\U0001f468\U0001f3fb\u200d\U0001f4bc|\U0001f468\U0001f3fc\u200d\U0001f4bc|\U0001f468\U0001f3fd\u200d\U0001f4bc|\U0001f468\U0001f3fe\u200d\U0001f4bc|\U0001f468\U0001f3ff\u200d\U0001f4bc|\U0001f469\U0001f3fb\u200d\U0001f4bc|\U0001f469\U0001f3fc\u200d\U0001f4bc|\U0001f469\U0001f3fd\u200d\U0001f4bc|\U0001f469\U0001f3fe\u200d\U0001f4bc|\U0001f469\U0001f3ff\u200d\U0001f4bc|\U0001f468\U0001f3fb\u200d\U0001f52c|\U0001f468\U0001f3fc\u200d\U0001f52c|\U0001f468\U0001f3fd\u200d\U0001f52c|\U0001f468\U0001f3fe\u200d\U0001f52c|\U0001f468\U0001f3ff\u200d\U0001f52c|\U0001f469\U0001f3fb\u200d\U0001f52c|\U0001f469\U0001f3fc\u200d\U0001f52c|\U0001f469\U0001f3fd\u200d\U0001f52c|\U0001f469\U0001f3fe\u200d\U0001f52c|\U0001f469\U0001f3ff\u200d\U0001f52c|\U0001f468\U0001f3fb\u200d\U0001f4bb|\U0001f468\U0001f3fc\u200d\U0001f4bb|\U0001f468\U0001f3fd\u200d\U0001f4bb|\U0001f468\U0001f3fe\u200d\U0001f4bb|\U0001f468\U0001f3ff\u200d\U0001f4bb|\U0001f469\U0001f3fb\u200d\U0001f4bb|\U0001f469\U0001f3fc\u200d\U0001f4bb|\U0001f469\U0001f3fd\u200d\U0001f4bb|\U0001f469\U0001f3fe\u200d\U0001f4bb|\U0001f469\U0001f3ff\u200d\U0001f4bb|\U0001f468\U0001f3fb\u200d\U0001f3a4|\U0001f468\U0001f3fc\u200d\U0001f3a4|\U0001f468\U0001f3fd\u200d\U0001f3a4|\U0001f468\U0001f3fe\u200d\U0001f3a4|\U0001f468\U0001f3ff\u200d\U0001f3a4|\U0001f469\U0001f3fb\u200d\U0001f3a4|\U0001f469\U0001f3fc\u200d\U0001f3a4|\U0001f469\U0001f3fd\u200d\U0001f3a4|\U0001f469\U0001f3fe\u200d\U0001f3a4|\U0001f469\U0001f3ff\u200d\U0001f3a4|\U0001f468\U0001f3fb\u200d\U0001f3a8|\U0001f468\U0001f3fc\u200d\U0001f3a8|\U0001f468\U0001f3fd\u200d\U0001f3a8|\U0001f468\U0001f3fe\u200d\U0001f3a8|\U0001f468\U0001f3ff\u200d\U0001f3a8|\U0001f469\U0001f3fb\u200d\U0001f3a8|\U0001f469\U0001f3fc\u200d\U0001f3a8|\U0001f469\U0001f3fd\u200d\U0001f3a8|\U0001f469\U0001f3fe\u200d\U0001f3a8|\U0001f469\U0001f3ff\u200d\U0001f3a8|\U0001f468\u200d\u2708\ufe0f|\U0001f468\U0001f3fb\u200d\u2708|\U0001f468\U0001f3fc\u200d\u2708|\U0001f468\U0001f3fd\u200d\u2708|\U0001f468\U0001f3fe\u200d\u2708|\U0001f468\U0001f3ff\u200d\u2708|\U0001f469\u200d\u2708\ufe0f|\U0001f469\U0001f3fb\u200d\u2708|\U0001f469\U0001f3fc\u200d\u2708|\U0001f469\U0001f3fd\u200d\u2708|\U0001f469\U0001f3fe\u200d\u2708|\U0001f469\U0001f3ff\u200d\u2708|\U0001f468\U0001f3fb\u200d\U0001f680|\U0001f468\U0001f3fc\u200d\U0001f680|\U0001f468\U0001f3fd\u200d\U0001f680|\U0001f468\U0001f3fe\u200d\U0001f680|\U0001f468\U0001f3ff\u200d\U0001f680|\U0001f469\U0001f3fb\u200d\U0001f680|\U0001f469\U0001f3fc\u200d\U0001f680|\U0001f469\U0001f3fd\u200d\U0001f680|\U0001f469\U0001f3fe\u200d\U0001f680|\U0001f469\U0001f3ff\u200d\U0001f680|\U0001f468\U0001f3fb\u200d\U0001f692|\U0001f468\U0001f3fc\u200d\U0001f692|\U0001f468\U0001f3fd\u200d\U0001f692|\U0001f468\U0001f3fe\u200d\U0001f692|\U0001f468\U0001f3ff\u200d\U0001f692|\U0001f469\U0001f3fb\u200d\U0001f692|\U0001f469\U0001f3fc\u200d\U0001f692|\U0001f469\U0001f3fd\u200d\U0001f692|\U0001f469\U0001f3fe\u200d\U0001f692|\U0001f469\U0001f3ff\u200d\U0001f692|\U0001f46e\u200d\u2642\ufe0f|\U0001f46e\U0001f3fb\u200d\u2642|\U0001f46e\U0001f3fc\u200d\u2642|\U0001f46e\U0001f3fd\u200d\u2642|\U0001f46e\U0001f3fe\u200d\u2642|\U0001f46e\U0001f3ff\u200d\u2642|\U0001f46e\u200d\u2640\ufe0f|\U0001f46e\U0001f3fb\u200d\u2640|\U0001f46e\U0001f3fc\u200d\u2640|\U0001f46e\U0001f3fd\u200d\u2640|\U0001f46e\U0001f3fe\u200d\u2640|\U0001f46e\U0001f3ff\u200d\u2640|\U0001f575\u200d\u2642\ufe0f|\U0001f575\ufe0f\u200d\u2642|\U0001f575\U0001f3fb\u200d\u2642|\U0001f575\U0001f3fc\u200d\u2642|\U0001f575\U0001f3fd\u200d\u2642|\U0001f575\U0001f3fe\u200d\u2642|\U0001f575\U0001f3ff\u200d\u2642|\U0001f575\u200d\u2640\ufe0f|\U0001f575\ufe0f\u200d\u2640|\U0001f575\U0001f3fb\u200d\u2640|\U0001f575\U0001f3fc\u200d\u2640|\U0001f575\U0001f3fd\u200d\u2640|\U0001f575\U0001f3fe\u200d\u2640|\U0001f575\U0001f3ff\u200d\u2640|\U0001f482\u200d\u2642\ufe0f|\U0001f482\U0001f3fb\u200d\u2642|\U0001f482\U0001f3fc\u200d\u2642|\U0001f482\U0001f3fd\u200d\u2642|\U0001f482\U0001f3fe\u200d\u2642|\U0001f482\U0001f3ff\u200d\u2642|\U0001f482\u200d\u2640\ufe0f|\U0001f482\U0001f3fb\u200d\u2640|\U0001f482\U0001f3fc\u200d\u2640|\U0001f482\U0001f3fd\u200d\u2640|\U0001f482\U0001f3fe\u200d\u2640|\U0001f482\U0001f3ff\u200d\u2640|\U0001f477\u200d\u2642\ufe0f|\U0001f477\U0001f3fb\u200d\u2642|\U0001f477\U0001f3fc\u200d\u2642|\U0001f477\U0001f3fd\u200d\u2642|\U0001f477\U0001f3fe\u200d\u2642|\U0001f477\U0001f3ff\u200d\u2642|\U0001f477\u200d\u2640\ufe0f|\U0001f477\U0001f3fb\u200d\u2640|\U0001f477\U0001f3fc\u200d\u2640|\U0001f477\U0001f3fd\u200d\u2640|\U0001f477\U0001f3fe\u200d\u2640|\U0001f477\U0001f3ff\u200d\u2640|\U0001f473\u200d\u2642\ufe0f|\U0001f473\U0001f3fb\u200d\u2642|\U0001f473\U0001f3fc\u200d\u2642|\U0001f473\U0001f3fd\u200d\u2642|\U0001f473\U0001f3fe\u200d\u2642|\U0001f473\U0001f3ff\u200d\u2642|\U0001f473\u200d\u2640\ufe0f|\U0001f473\U0001f3fb\u200d\u2640|\U0001f473\U0001f3fc\u200d\u2640|\U0001f473\U0001f3fd\u200d\u2640|\U0001f473\U0001f3fe\u200d\u2640|\U0001f473\U0001f3ff\u200d\u2640|\U0001f9b8\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2642|\U0001f9b8\U0001f3fc\u200d\u2642|\U0001f9b8\U0001f3fd\u200d\u2642|\U0001f9b8\U0001f3fe\u200d\u2642|\U0001f9b8\U0001f3ff\u200d\u2642|\U0001f9b8\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2640|\U0001f9b8\U0001f3fc\u200d\u2640|\U0001f9b8\U0001f3fd\u200d\u2640|\U0001f9b8\U0001f3fe\u200d\u2640|\U0001f9b8\U0001f3ff\u200d\u2640|\U0001f9b9\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2642|\U0001f9b9\U0001f3fc\u200d\u2642|\U0001f9b9\U0001f3fd\u200d\u2642|\U0001f9b9\U0001f3fe\u200d\u2642|\U0001f9b9\U0001f3ff\u200d\u2642|\U0001f9b9\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2640|\U0001f9b9\U0001f3fc\u200d\u2640|\U0001f9b9\U0001f3fd\u200d\u2640|\U0001f9b9\U0001f3fe\u200d\u2640|\U0001f9b9\U0001f3ff\u200d\u2640|\U0001f9d9\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2642|\U0001f9d9\U0001f3fc\u200d\u2642|\U0001f9d9\U0001f3fd\u200d\u2642|\U0001f9d9\U0001f3fe\u200d\u2642|\U0001f9d9\U0001f3ff\u200d\u2642|\U0001f9d9\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2640|\U0001f9d9\U0001f3fc\u200d\u2640|\U0001f9d9\U0001f3fd\u200d\u2640|\U0001f9d9\U0001f3fe\u200d\u2640|\U0001f9d9\U0001f3ff\u200d\u2640|\U0001f9da\u200d\u2642\ufe0f|\U0001f9da\U0001f3fb\u200d\u2642|\U0001f9da\U0001f3fc\u200d\u2642|\U0001f9da\U0001f3fd\u200d\u2642|\U0001f9da\U0001f3fe\u200d\u2642|\U0001f9da\U0001f3ff\u200d\u2642|\U0001f9da\u200d\u2640\ufe0f|\U0001f9da\U0001f3fb\u200d\u2640|\U0001f9da\U0001f3fc\u200d\u2640|\U0001f9da\U0001f3fd\u200d\u2640|\U0001f9da\U0001f3fe\u200d\u2640|\U0001f9da\U0001f3ff\u200d\u2640|\U0001f9db\u200d\u2642\ufe0f|\U0001f9db\U0001f3fb\u200d\u2642|\U0001f9db\U0001f3fc\u200d\u2642|\U0001f9db\U0001f3fd\u200d\u2642|\U0001f9db\U0001f3fe\u200d\u2642|\U0001f9db\U0001f3ff\u200d\u2642|\U0001f9db\u200d\u2640\ufe0f|\U0001f9db\U0001f3fb\u200d\u2640|\U0001f9db\U0001f3fc\u200d\u2640|\U0001f9db\U0001f3fd\u200d\u2640|\U0001f9db\U0001f3fe\u200d\u2640|\U0001f9db\U0001f3ff\u200d\u2640|\U0001f9dc\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2642|\U0001f9dc\U0001f3fc\u200d\u2642|\U0001f9dc\U0001f3fd\u200d\u2642|\U0001f9dc\U0001f3fe\u200d\u2642|\U0001f9dc\U0001f3ff\u200d\u2642|\U0001f9dc\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2640|\U0001f9dc\U0001f3fc\u200d\u2640|\U0001f9dc\U0001f3fd\u200d\u2640|\U0001f9dc\U0001f3fe\u200d\u2640|\U0001f9dc\U0001f3ff\u200d\u2640|\U0001f9dd\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2642|\U0001f9dd\U0001f3fc\u200d\u2642|\U0001f9dd\U0001f3fd\u200d\u2642|\U0001f9dd\U0001f3fe\u200d\u2642|\U0001f9dd\U0001f3ff\u200d\u2642|\U0001f9dd\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2640|\U0001f9dd\U0001f3fc\u200d\u2640|\U0001f9dd\U0001f3fd\u200d\u2640|\U0001f9dd\U0001f3fe\u200d\u2640|\U0001f9dd\U0001f3ff\u200d\u2640|\U0001f9de\u200d\u2642\ufe0f|\U0001f9de\u200d\u2640\ufe0f|\U0001f9df\u200d\u2642\ufe0f|\U0001f9df\u200d\u2640\ufe0f|\U0001f486\u200d\u2642\ufe0f|\U0001f486\U0001f3fb\u200d\u2642|\U0001f486\U0001f3fc\u200d\u2642|\U0001f486\U0001f3fd\u200d\u2642|\U0001f486\U0001f3fe\u200d\u2642|\U0001f486\U0001f3ff\u200d\u2642|\U0001f486\u200d\u2640\ufe0f|\U0001f486\U0001f3fb\u200d\u2640|\U0001f486\U0001f3fc\u200d\u2640|\U0001f486\U0001f3fd\u200d\u2640|\U0001f486\U0001f3fe\u200d\u2640|\U0001f486\U0001f3ff\u200d\u2640|\U0001f487\u200d\u2642\ufe0f|\U0001f487\U0001f3fb\u200d\u2642|\U0001f487\U0001f3fc\u200d\u2642|\U0001f487\U0001f3fd\u200d\u2642|\U0001f487\U0001f3fe\u200d\u2642|\U0001f487\U0001f3ff\u200d\u2642|\U0001f487\u200d\u2640\ufe0f|\U0001f487\U0001f3fb\u200d\u2640|\U0001f487\U0001f3fc\u200d\u2640|\U0001f487\U0001f3fd\u200d\u2640|\U0001f487\U0001f3fe\u200d\u2640|\U0001f487\U0001f3ff\u200d\u2640|\U0001f6b6\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2642|\U0001f6b6\U0001f3fc\u200d\u2642|\U0001f6b6\U0001f3fd\u200d\u2642|\U0001f6b6\U0001f3fe\u200d\u2642|\U0001f6b6\U0001f3ff\u200d\u2642|\U0001f6b6\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2640|\U0001f6b6\U0001f3fc\u200d\u2640|\U0001f6b6\U0001f3fd\u200d\u2640|\U0001f6b6\U0001f3fe\u200d\u2640|\U0001f6b6\U0001f3ff\u200d\u2640|\U0001f9cd\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2642|\U0001f9cd\U0001f3fc\u200d\u2642|\U0001f9cd\U0001f3fd\u200d\u2642|\U0001f9cd\U0001f3fe\u200d\u2642|\U0001f9cd\U0001f3ff\u200d\u2642|\U0001f9cd\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2640|\U0001f9cd\U0001f3fc\u200d\u2640|\U0001f9cd\U0001f3fd\u200d\u2640|\U0001f9cd\U0001f3fe\u200d\u2640|\U0001f9cd\U0001f3ff\u200d\u2640|\U0001f9ce\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2642|\U0001f9ce\U0001f3fc\u200d\u2642|\U0001f9ce\U0001f3fd\u200d\u2642|\U0001f9ce\U0001f3fe\u200d\u2642|\U0001f9ce\U0001f3ff\u200d\u2642|\U0001f9ce\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2640|\U0001f9ce\U0001f3fc\u200d\u2640|\U0001f9ce\U0001f3fd\u200d\u2640|\U0001f9ce\U0001f3fe\u200d\u2640|\U0001f9ce\U0001f3ff\u200d\u2640|\U0001f468\U0001f3fb\u200d\U0001f9af|\U0001f468\U0001f3fc\u200d\U0001f9af|\U0001f468\U0001f3fd\u200d\U0001f9af|\U0001f468\U0001f3fe\u200d\U0001f9af|\U0001f468\U0001f3ff\u200d\U0001f9af|\U0001f469\U0001f3fb\u200d\U0001f9af|\U0001f469\U0001f3fc\u200d\U0001f9af|\U0001f469\U0001f3fd\u200d\U0001f9af|\U0001f469\U0001f3fe\u200d\U0001f9af|\U0001f469\U0001f3ff\u200d\U0001f9af|\U0001f468\U0001f3fb\u200d\U0001f9bc|\U0001f468\U0001f3fc\u200d\U0001f9bc|\U0001f468\U0001f3fd\u200d\U0001f9bc|\U0001f468\U0001f3fe\u200d\U0001f9bc|\U0001f468\U0001f3ff\u200d\U0001f9bc|\U0001f469\U0001f3fb\u200d\U0001f9bc|\U0001f469\U0001f3fc\u200d\U0001f9bc|\U0001f469\U0001f3fd\u200d\U0001f9bc|\U0001f469\U0001f3fe\u200d\U0001f9bc|\U0001f469\U0001f3ff\u200d\U0001f9bc|\U0001f468\U0001f3fb\u200d\U0001f9bd|\U0001f468\U0001f3fc\u200d\U0001f9bd|\U0001f468\U0001f3fd\u200d\U0001f9bd|\U0001f468\U0001f3fe\u200d\U0001f9bd|\U0001f468\U0001f3ff\u200d\U0001f9bd|\U0001f469\U0001f3fb\u200d\U0001f9bd|\U0001f469\U0001f3fc\u200d\U0001f9bd|\U0001f469\U0001f3fd\u200d\U0001f9bd|\U0001f469\U0001f3fe\u200d\U0001f9bd|\U0001f469\U0001f3ff\u200d\U0001f9bd|\U0001f3c3\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2642|\U0001f3c3\U0001f3fc\u200d\u2642|\U0001f3c3\U0001f3fd\u200d\u2642|\U0001f3c3\U0001f3fe\u200d\u2642|\U0001f3c3\U0001f3ff\u200d\u2642|\U0001f3c3\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2640|\U0001f3c3\U0001f3fc\u200d\u2640|\U0001f3c3\U0001f3fd\u200d\u2640|\U0001f3c3\U0001f3fe\u200d\u2640|\U0001f3c3\U0001f3ff\u200d\u2640|\U0001f46f\u200d\u2642\ufe0f|\U0001f46f\u200d\u2640\ufe0f|\U0001f9d6\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2642|\U0001f9d6\U0001f3fc\u200d\u2642|\U0001f9d6\U0001f3fd\u200d\u2642|\U0001f9d6\U0001f3fe\u200d\u2642|\U0001f9d6\U0001f3ff\u200d\u2642|\U0001f9d6\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2640|\U0001f9d6\U0001f3fc\u200d\u2640|\U0001f9d6\U0001f3fd\u200d\u2640|\U0001f9d6\U0001f3fe\u200d\u2640|\U0001f9d6\U0001f3ff\u200d\u2640|\U0001f9d7\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2642|\U0001f9d7\U0001f3fc\u200d\u2642|\U0001f9d7\U0001f3fd\u200d\u2642|\U0001f9d7\U0001f3fe\u200d\u2642|\U0001f9d7\U0001f3ff\u200d\u2642|\U0001f9d7\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2640|\U0001f9d7\U0001f3fc\u200d\u2640|\U0001f9d7\U0001f3fd\u200d\u2640|\U0001f9d7\U0001f3fe\u200d\u2640|\U0001f9d7\U0001f3ff\u200d\u2640|\U0001f3cc\u200d\u2642\ufe0f|\U0001f3cc\ufe0f\u200d\u2642|\U0001f3cc\U0001f3fb\u200d\u2642|\U0001f3cc\U0001f3fc\u200d\u2642|\U0001f3cc\U0001f3fd\u200d\u2642|\U0001f3cc\U0001f3fe\u200d\u2642|\U0001f3cc\U0001f3ff\u200d\u2642|\U0001f3cc\u200d\u2640\ufe0f|\U0001f3cc\ufe0f\u200d\u2640|\U0001f3cc\U0001f3fb\u200d\u2640|\U0001f3cc\U0001f3fc\u200d\u2640|\U0001f3cc\U0001f3fd\u200d\u2640|\U0001f3cc\U0001f3fe\u200d\u2640|\U0001f3cc\U0001f3ff\u200d\u2640|\U0001f3c4\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2642|\U0001f3c4\U0001f3fc\u200d\u2642|\U0001f3c4\U0001f3fd\u200d\u2642|\U0001f3c4\U0001f3fe\u200d\u2642|\U0001f3c4\U0001f3ff\u200d\u2642|\U0001f3c4\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2640|\U0001f3c4\U0001f3fc\u200d\u2640|\U0001f3c4\U0001f3fd\u200d\u2640|\U0001f3c4\U0001f3fe\u200d\u2640|\U0001f3c4\U0001f3ff\u200d\u2640|\U0001f6a3\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2642|\U0001f6a3\U0001f3fc\u200d\u2642|\U0001f6a3\U0001f3fd\u200d\u2642|\U0001f6a3\U0001f3fe\u200d\u2642|\U0001f6a3\U0001f3ff\u200d\u2642|\U0001f6a3\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2640|\U0001f6a3\U0001f3fc\u200d\u2640|\U0001f6a3\U0001f3fd\u200d\u2640|\U0001f6a3\U0001f3fe\u200d\u2640|\U0001f6a3\U0001f3ff\u200d\u2640|\U0001f3ca\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2642|\U0001f3ca\U0001f3fc\u200d\u2642|\U0001f3ca\U0001f3fd\u200d\u2642|\U0001f3ca\U0001f3fe\u200d\u2642|\U0001f3ca\U0001f3ff\u200d\u2642|\U0001f3ca\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2640|\U0001f3ca\U0001f3fc\u200d\u2640|\U0001f3ca\U0001f3fd\u200d\u2640|\U0001f3ca\U0001f3fe\u200d\u2640|\U0001f3ca\U0001f3ff\u200d\u2640|\u26f9\u200d\u2642\ufe0f|\u26f9\ufe0f\u200d\u2642|\u26f9\U0001f3fb\u200d\u2642|\u26f9\U0001f3fc\u200d\u2642|\u26f9\U0001f3fd\u200d\u2642|\u26f9\U0001f3fe\u200d\u2642|\u26f9\U0001f3ff\u200d\u2642|\u26f9\u200d\u2640\ufe0f|\u26f9\ufe0f\u200d\u2640|\u26f9\U0001f3fb\u200d\u2640|\u26f9\U0001f3fc\u200d\u2640|\u26f9\U0001f3fd\u200d\u2640|\u26f9\U0001f3fe\u200d\u2640|\u26f9\U0001f3ff\u200d\u2640|\U0001f3cb\u200d\u2642\ufe0f|\U0001f3cb\ufe0f\u200d\u2642|\U0001f3cb\U0001f3fb\u200d\u2642|\U0001f3cb\U0001f3fc\u200d\u2642|\U0001f3cb\U0001f3fd\u200d\u2642|\U0001f3cb\U0001f3fe\u200d\u2642|\U0001f3cb\U0001f3ff\u200d\u2642|\U0001f3cb\u200d\u2640\ufe0f|\U0001f3cb\ufe0f\u200d\u2640|\U0001f3cb\U0001f3fb\u200d\u2640|\U0001f3cb\U0001f3fc\u200d\u2640|\U0001f3cb\U0001f3fd\u200d\u2640|\U0001f3cb\U0001f3fe\u200d\u2640|\U0001f3cb\U0001f3ff\u200d\u2640|\U0001f6b4\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2642|\U0001f6b4\U0001f3fc\u200d\u2642|\U0001f6b4\U0001f3fd\u200d\u2642|\U0001f6b4\U0001f3fe\u200d\u2642|\U0001f6b4\U0001f3ff\u200d\u2642|\U0001f6b4\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2640|\U0001f6b4\U0001f3fc\u200d\u2640|\U0001f6b4\U0001f3fd\u200d\u2640|\U0001f6b4\U0001f3fe\u200d\u2640|\U0001f6b4\U0001f3ff\u200d\u2640|\U0001f6b5\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2642|\U0001f6b5\U0001f3fc\u200d\u2642|\U0001f6b5\U0001f3fd\u200d\u2642|\U0001f6b5\U0001f3fe\u200d\u2642|\U0001f6b5\U0001f3ff\u200d\u2642|\U0001f6b5\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2640|\U0001f6b5\U0001f3fc\u200d\u2640|\U0001f6b5\U0001f3fd\u200d\u2640|\U0001f6b5\U0001f3fe\u200d\u2640|\U0001f6b5\U0001f3ff\u200d\u2640|\U0001f938\u200d\u2642\ufe0f|\U0001f938\U0001f3fb\u200d\u2642|\U0001f938\U0001f3fc\u200d\u2642|\U0001f938\U0001f3fd\u200d\u2642|\U0001f938\U0001f3fe\u200d\u2642|\U0001f938\U0001f3ff\u200d\u2642|\U0001f938\u200d\u2640\ufe0f|\U0001f938\U0001f3fb\u200d\u2640|\U0001f938\U0001f3fc\u200d\u2640|\U0001f938\U0001f3fd\u200d\u2640|\U0001f938\U0001f3fe\u200d\u2640|\U0001f938\U0001f3ff\u200d\u2640|\U0001f93c\u200d\u2642\ufe0f|\U0001f93c\u200d\u2640\ufe0f|\U0001f93d\u200d\u2642\ufe0f|\U0001f93d\U0001f3fb\u200d\u2642|\U0001f93d\U0001f3fc\u200d\u2642|\U0001f93d\U0001f3fd\u200d\u2642|\U0001f93d\U0001f3fe\u200d\u2642|\U0001f93d\U0001f3ff\u200d\u2642|\U0001f93d\u200d\u2640\ufe0f|\U0001f93d\U0001f3fb\u200d\u2640|\U0001f93d\U0001f3fc\u200d\u2640|\U0001f93d\U0001f3fd\u200d\u2640|\U0001f93d\U0001f3fe\u200d\u2640|\U0001f93d\U0001f3ff\u200d\u2640|\U0001f93e\u200d\u2642\ufe0f|\U0001f93e\U0001f3fb\u200d\u2642|\U0001f93e\U0001f3fc\u200d\u2642|\U0001f93e\U0001f3fd\u200d\u2642|\U0001f93e\U0001f3fe\u200d\u2642|\U0001f93e\U0001f3ff\u200d\u2642|\U0001f93e\u200d\u2640\ufe0f|\U0001f93e\U0001f3fb\u200d\u2640|\U0001f93e\U0001f3fc\u200d\u2640|\U0001f93e\U0001f3fd\u200d\u2640|\U0001f93e\U0001f3fe\u200d\u2640|\U0001f93e\U0001f3ff\u200d\u2640|\U0001f939\u200d\u2642\ufe0f|\U0001f939\U0001f3fb\u200d\u2642|\U0001f939\U0001f3fc\u200d\u2642|\U0001f939\U0001f3fd\u200d\u2642|\U0001f939\U0001f3fe\u200d\u2642|\U0001f939\U0001f3ff\u200d\u2642|\U0001f939\u200d\u2640\ufe0f|\U0001f939\U0001f3fb\u200d\u2640|\U0001f939\U0001f3fc\u200d\u2640|\U0001f939\U0001f3fd\u200d\u2640|\U0001f939\U0001f3fe\u200d\u2640|\U0001f939\U0001f3ff\u200d\u2640|\U0001f9d8\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2642|\U0001f9d8\U0001f3fc\u200d\u2642|\U0001f9d8\U0001f3fd\u200d\u2642|\U0001f9d8\U0001f3fe\u200d\u2642|\U0001f9d8\U0001f3ff\u200d\u2642|\U0001f9d8\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2640|\U0001f9d8\U0001f3fc\u200d\u2640|\U0001f9d8\U0001f3fd\u200d\u2640|\U0001f9d8\U0001f3fe\u200d\u2640|\U0001f9d8\U0001f3ff\u200d\u2640|\U0001f3f3\ufe0f\u200d\U0001f308|\U0001f3f4\u200d\u2620\ufe0f|\U0001f441\u200d\U0001f5e8|\U0001f471\u200d\u2642|\U0001f468\u200d\U0001f9b0|\U0001f468\u200d\U0001f9b1|\U0001f468\u200d\U0001f9b3|\U0001f468\u200d\U0001f9b2|\U0001f471\u200d\u2640|\U0001f469\u200d\U0001f9b0|\U0001f469\u200d\U0001f9b1|\U0001f469\u200d\U0001f9b3|\U0001f469\u200d\U0001f9b2|\U0001f64d\u200d\u2642|\U0001f64d\u200d\u2640|\U0001f64e\u200d\u2642|\U0001f64e\u200d\u2640|\U0001f645\u200d\u2642|\U0001f645\u200d\u2640|\U0001f646\u200d\u2642|\U0001f646\u200d\u2640|\U0001f481\u200d\u2642|\U0001f481\u200d\u2640|\U0001f64b\u200d\u2642|\U0001f64b\u200d\u2640|\U0001f9cf\u200d\u2642|\U0001f9cf\u200d\u2640|\U0001f647\u200d\u2642|\U0001f647\u200d\u2640|\U0001f926\u200d\u2642|\U0001f926\u200d\u2640|\U0001f937\u200d\u2642|\U0001f937\u200d\u2640|\U0001f468\u200d\u2695|\U0001f469\u200d\u2695|\U0001f468\u200d\U0001f393|\U0001f469\u200d\U0001f393|\U0001f468\u200d\U0001f3eb|\U0001f469\u200d\U0001f3eb|\U0001f468\u200d\u2696|\U0001f469\u200d\u2696|\U0001f468\u200d\U0001f33e|\U0001f469\u200d\U0001f33e|\U0001f468\u200d\U0001f373|\U0001f469\u200d\U0001f373|\U0001f468\u200d\U0001f527|\U0001f469\u200d\U0001f527|\U0001f468\u200d\U0001f3ed|\U0001f469\u200d\U0001f3ed|\U0001f468\u200d\U0001f4bc|\U0001f469\u200d\U0001f4bc|\U0001f468\u200d\U0001f52c|\U0001f469\u200d\U0001f52c|\U0001f468\u200d\U0001f4bb|\U0001f469\u200d\U0001f4bb|\U0001f468\u200d\U0001f3a4|\U0001f469\u200d\U0001f3a4|\U0001f468\u200d\U0001f3a8|\U0001f469\u200d\U0001f3a8|\U0001f468\u200d\u2708|\U0001f469\u200d\u2708|\U0001f468\u200d\U0001f680|\U0001f469\u200d\U0001f680|\U0001f468\u200d\U0001f692|\U0001f469\u200d\U0001f692|\U0001f46e\u200d\u2642|\U0001f46e\u200d\u2640|\U0001f575\u200d\u2642|\U0001f575\u200d\u2640|\U0001f482\u200d\u2642|\U0001f482\u200d\u2640|\U0001f477\u200d\u2642|\U0001f477\u200d\u2640|\U0001f473\u200d\u2642|\U0001f473\u200d\u2640|\U0001f9b8\u200d\u2642|\U0001f9b8\u200d\u2640|\U0001f9b9\u200d\u2642|\U0001f9b9\u200d\u2640|\U0001f9d9\u200d\u2642|\U0001f9d9\u200d\u2640|\U0001f9da\u200d\u2642|\U0001f9da\u200d\u2640|\U0001f9db\u200d\u2642|\U0001f9db\u200d\u2640|\U0001f9dc\u200d\u2642|\U0001f9dc\u200d\u2640|\U0001f9dd\u200d\u2642|\U0001f9dd\u200d\u2640|\U0001f9de\u200d\u2642|\U0001f9de\u200d\u2640|\U0001f9df\u200d\u2642|\U0001f9df\u200d\u2640|\U0001f486\u200d\u2642|\U0001f486\u200d\u2640|\U0001f487\u200d\u2642|\U0001f487\u200d\u2640|\U0001f6b6\u200d\u2642|\U0001f6b6\u200d\u2640|\U0001f9cd\u200d\u2642|\U0001f9cd\u200d\u2640|\U0001f9ce\u200d\u2642|\U0001f9ce\u200d\u2640|\U0001f468\u200d\U0001f9af|\U0001f469\u200d\U0001f9af|\U0001f468\u200d\U0001f9bc|\U0001f469\u200d\U0001f9bc|\U0001f468\u200d\U0001f9bd|\U0001f469\u200d\U0001f9bd|\U0001f3c3\u200d\u2642|\U0001f3c3\u200d\u2640|\U0001f46f\u200d\u2642|\U0001f46f\u200d\u2640|\U0001f9d6\u200d\u2642|\U0001f9d6\u200d\u2640|\U0001f9d7\u200d\u2642|\U0001f9d7\u200d\u2640|\U0001f3cc\u200d\u2642|\U0001f3cc\u200d\u2640|\U0001f3c4\u200d\u2642|\U0001f3c4\u200d\u2640|\U0001f6a3\u200d\u2642|\U0001f6a3\u200d\u2640|\U0001f3ca\u200d\u2642|\U0001f3ca\u200d\u2640|\u26f9\u200d\u2642|\u26f9\u200d\u2640|\U0001f3cb\u200d\u2642|\U0001f3cb\u200d\u2640|\U0001f6b4\u200d\u2642|\U0001f6b4\u200d\u2640|\U0001f6b5\u200d\u2642|\U0001f6b5\u200d\u2640|\U0001f938\u200d\u2642|\U0001f938\u200d\u2640|\U0001f93c\u200d\u2642|\U0001f93c\u200d\u2640|\U0001f93d\u200d\u2642|\U0001f93d\u200d\u2640|\U0001f93e\u200d\u2642|\U0001f93e\u200d\u2640|\U0001f939\u200d\u2642|\U0001f939\u200d\u2640|\U0001f9d8\u200d\u2642|\U0001f9d8\u200d\u2640|\U0001f468\u200d\U0001f466|\U0001f468\u200d\U0001f467|\U0001f469\u200d\U0001f466|\U0001f469\u200d\U0001f467|\U0001f415\u200d\U0001f9ba|\\#\ufe0f\u20e3|\\*\ufe0f\u20e3|0\ufe0f\u20e3|1\ufe0f\u20e3|2\ufe0f\u20e3|3\ufe0f\u20e3|4\ufe0f\u20e3|5\ufe0f\u20e3|6\ufe0f\u20e3|7\ufe0f\u20e3|8\ufe0f\u20e3|9\ufe0f\u20e3|\U0001f3f3\u200d\U0001f308|\U0001f3f4\u200d\u2620|\u263a\ufe0f|\u2639\ufe0f|\u2620\ufe0f|\u2763\ufe0f|\u2764\ufe0f|\U0001f573\ufe0f|\U0001f5e8\ufe0f|\U0001f5ef\ufe0f|\U0001f44b\U0001f3fb|\U0001f44b\U0001f3fc|\U0001f44b\U0001f3fd|\U0001f44b\U0001f3fe|\U0001f44b\U0001f3ff|\U0001f91a\U0001f3fb|\U0001f91a\U0001f3fc|\U0001f91a\U0001f3fd|\U0001f91a\U0001f3fe|\U0001f91a\U0001f3ff|\U0001f590\ufe0f|\U0001f590\U0001f3fb|\U0001f590\U0001f3fc|\U0001f590\U0001f3fd|\U0001f590\U0001f3fe|\U0001f590\U0001f3ff|\u270b\U0001f3fb|\u270b\U0001f3fc|\u270b\U0001f3fd|\u270b\U0001f3fe|\u270b\U0001f3ff|\U0001f596\U0001f3fb|\U0001f596\U0001f3fc|\U0001f596\U0001f3fd|\U0001f596\U0001f3fe|\U0001f596\U0001f3ff|\U0001f44c\U0001f3fb|\U0001f44c\U0001f3fc|\U0001f44c\U0001f3fd|\U0001f44c\U0001f3fe|\U0001f44c\U0001f3ff|\U0001f90f\U0001f3fb|\U0001f90f\U0001f3fc|\U0001f90f\U0001f3fd|\U0001f90f\U0001f3fe|\U0001f90f\U0001f3ff|\u270c\ufe0f|\u270c\U0001f3fb|\u270c\U0001f3fc|\u270c\U0001f3fd|\u270c\U0001f3fe|\u270c\U0001f3ff|\U0001f91e\U0001f3fb|\U0001f91e\U0001f3fc|\U0001f91e\U0001f3fd|\U0001f91e\U0001f3fe|\U0001f91e\U0001f3ff|\U0001f91f\U0001f3fb|\U0001f91f\U0001f3fc|\U0001f91f\U0001f3fd|\U0001f91f\U0001f3fe|\U0001f91f\U0001f3ff|\U0001f918\U0001f3fb|\U0001f918\U0001f3fc|\U0001f918\U0001f3fd|\U0001f918\U0001f3fe|\U0001f918\U0001f3ff|\U0001f919\U0001f3fb|\U0001f919\U0001f3fc|\U0001f919\U0001f3fd|\U0001f919\U0001f3fe|\U0001f919\U0001f3ff|\U0001f448\U0001f3fb|\U0001f448\U0001f3fc|\U0001f448\U0001f3fd|\U0001f448\U0001f3fe|\U0001f448\U0001f3ff|\U0001f449\U0001f3fb|\U0001f449\U0001f3fc|\U0001f449\U0001f3fd|\U0001f449\U0001f3fe|\U0001f449\U0001f3ff|\U0001f446\U0001f3fb|\U0001f446\U0001f3fc|\U0001f446\U0001f3fd|\U0001f446\U0001f3fe|\U0001f446\U0001f3ff|\U0001f595\U0001f3fb|\U0001f595\U0001f3fc|\U0001f595\U0001f3fd|\U0001f595\U0001f3fe|\U0001f595\U0001f3ff|\U0001f447\U0001f3fb|\U0001f447\U0001f3fc|\U0001f447\U0001f3fd|\U0001f447\U0001f3fe|\U0001f447\U0001f3ff|\u261d\ufe0f|\u261d\U0001f3fb|\u261d\U0001f3fc|\u261d\U0001f3fd|\u261d\U0001f3fe|\u261d\U0001f3ff|\U0001f44d\U0001f3fb|\U0001f44d\U0001f3fc|\U0001f44d\U0001f3fd|\U0001f44d\U0001f3fe|\U0001f44d\U0001f3ff|\U0001f44e\U0001f3fb|\U0001f44e\U0001f3fc|\U0001f44e\U0001f3fd|\U0001f44e\U0001f3fe|\U0001f44e\U0001f3ff|\u270a\U0001f3fb|\u270a\U0001f3fc|\u270a\U0001f3fd|\u270a\U0001f3fe|\u270a\U0001f3ff|\U0001f44a\U0001f3fb|\U0001f44a\U0001f3fc|\U0001f44a\U0001f3fd|\U0001f44a\U0001f3fe|\U0001f44a\U0001f3ff|\U0001f91b\U0001f3fb|\U0001f91b\U0001f3fc|\U0001f91b\U0001f3fd|\U0001f91b\U0001f3fe|\U0001f91b\U0001f3ff|\U0001f91c\U0001f3fb|\U0001f91c\U0001f3fc|\U0001f91c\U0001f3fd|\U0001f91c\U0001f3fe|\U0001f91c\U0001f3ff|\U0001f44f\U0001f3fb|\U0001f44f\U0001f3fc|\U0001f44f\U0001f3fd|\U0001f44f\U0001f3fe|\U0001f44f\U0001f3ff|\U0001f64c\U0001f3fb|\U0001f64c\U0001f3fc|\U0001f64c\U0001f3fd|\U0001f64c\U0001f3fe|\U0001f64c\U0001f3ff|\U0001f450\U0001f3fb|\U0001f450\U0001f3fc|\U0001f450\U0001f3fd|\U0001f450\U0001f3fe|\U0001f450\U0001f3ff|\U0001f932\U0001f3fb|\U0001f932\U0001f3fc|\U0001f932\U0001f3fd|\U0001f932\U0001f3fe|\U0001f932\U0001f3ff|\U0001f64f\U0001f3fb|\U0001f64f\U0001f3fc|\U0001f64f\U0001f3fd|\U0001f64f\U0001f3fe|\U0001f64f\U0001f3ff|\u270d\ufe0f|\u270d\U0001f3fb|\u270d\U0001f3fc|\u270d\U0001f3fd|\u270d\U0001f3fe|\u270d\U0001f3ff|\U0001f485\U0001f3fb|\U0001f485\U0001f3fc|\U0001f485\U0001f3fd|\U0001f485\U0001f3fe|\U0001f485\U0001f3ff|\U0001f933\U0001f3fb|\U0001f933\U0001f3fc|\U0001f933\U0001f3fd|\U0001f933\U0001f3fe|\U0001f933\U0001f3ff|\U0001f4aa\U0001f3fb|\U0001f4aa\U0001f3fc|\U0001f4aa\U0001f3fd|\U0001f4aa\U0001f3fe|\U0001f4aa\U0001f3ff|\U0001f9b5\U0001f3fb|\U0001f9b5\U0001f3fc|\U0001f9b5\U0001f3fd|\U0001f9b5\U0001f3fe|\U0001f9b5\U0001f3ff|\U0001f9b6\U0001f3fb|\U0001f9b6\U0001f3fc|\U0001f9b6\U0001f3fd|\U0001f9b6\U0001f3fe|\U0001f9b6\U0001f3ff|\U0001f442\U0001f3fb|\U0001f442\U0001f3fc|\U0001f442\U0001f3fd|\U0001f442\U0001f3fe|\U0001f442\U0001f3ff|\U0001f9bb\U0001f3fb|\U0001f9bb\U0001f3fc|\U0001f9bb\U0001f3fd|\U0001f9bb\U0001f3fe|\U0001f9bb\U0001f3ff|\U0001f443\U0001f3fb|\U0001f443\U0001f3fc|\U0001f443\U0001f3fd|\U0001f443\U0001f3fe|\U0001f443\U0001f3ff|\U0001f441\ufe0f|\U0001f476\U0001f3fb|\U0001f476\U0001f3fc|\U0001f476\U0001f3fd|\U0001f476\U0001f3fe|\U0001f476\U0001f3ff|\U0001f9d2\U0001f3fb|\U0001f9d2\U0001f3fc|\U0001f9d2\U0001f3fd|\U0001f9d2\U0001f3fe|\U0001f9d2\U0001f3ff|\U0001f466\U0001f3fb|\U0001f466\U0001f3fc|\U0001f466\U0001f3fd|\U0001f466\U0001f3fe|\U0001f466\U0001f3ff|\U0001f467\U0001f3fb|\U0001f467\U0001f3fc|\U0001f467\U0001f3fd|\U0001f467\U0001f3fe|\U0001f467\U0001f3ff|\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3fe|\U0001f9d1\U0001f3ff|\U0001f471\U0001f3fb|\U0001f471\U0001f3fc|\U0001f471\U0001f3fd|\U0001f471\U0001f3fe|\U0001f471\U0001f3ff|\U0001f468\U0001f3fb|\U0001f468\U0001f3fc|\U0001f468\U0001f3fd|\U0001f468\U0001f3fe|\U0001f468\U0001f3ff|\U0001f9d4\U0001f3fb|\U0001f9d4\U0001f3fc|\U0001f9d4\U0001f3fd|\U0001f9d4\U0001f3fe|\U0001f9d4\U0001f3ff|\U0001f469\U0001f3fb|\U0001f469\U0001f3fc|\U0001f469\U0001f3fd|\U0001f469\U0001f3fe|\U0001f469\U0001f3ff|\U0001f9d3\U0001f3fb|\U0001f9d3\U0001f3fc|\U0001f9d3\U0001f3fd|\U0001f9d3\U0001f3fe|\U0001f9d3\U0001f3ff|\U0001f474\U0001f3fb|\U0001f474\U0001f3fc|\U0001f474\U0001f3fd|\U0001f474\U0001f3fe|\U0001f474\U0001f3ff|\U0001f475\U0001f3fb|\U0001f475\U0001f3fc|\U0001f475\U0001f3fd|\U0001f475\U0001f3fe|\U0001f475\U0001f3ff|\U0001f64d\U0001f3fb|\U0001f64d\U0001f3fc|\U0001f64d\U0001f3fd|\U0001f64d\U0001f3fe|\U0001f64d\U0001f3ff|\U0001f64e\U0001f3fb|\U0001f64e\U0001f3fc|\U0001f64e\U0001f3fd|\U0001f64e\U0001f3fe|\U0001f64e\U0001f3ff|\U0001f645\U0001f3fb|\U0001f645\U0001f3fc|\U0001f645\U0001f3fd|\U0001f645\U0001f3fe|\U0001f645\U0001f3ff|\U0001f646\U0001f3fb|\U0001f646\U0001f3fc|\U0001f646\U0001f3fd|\U0001f646\U0001f3fe|\U0001f646\U0001f3ff|\U0001f481\U0001f3fb|\U0001f481\U0001f3fc|\U0001f481\U0001f3fd|\U0001f481\U0001f3fe|\U0001f481\U0001f3ff|\U0001f64b\U0001f3fb|\U0001f64b\U0001f3fc|\U0001f64b\U0001f3fd|\U0001f64b\U0001f3fe|\U0001f64b\U0001f3ff|\U0001f9cf\U0001f3fb|\U0001f9cf\U0001f3fc|\U0001f9cf\U0001f3fd|\U0001f9cf\U0001f3fe|\U0001f9cf\U0001f3ff|\U0001f647\U0001f3fb|\U0001f647\U0001f3fc|\U0001f647\U0001f3fd|\U0001f647\U0001f3fe|\U0001f647\U0001f3ff|\U0001f926\U0001f3fb|\U0001f926\U0001f3fc|\U0001f926\U0001f3fd|\U0001f926\U0001f3fe|\U0001f926\U0001f3ff|\U0001f937\U0001f3fb|\U0001f937\U0001f3fc|\U0001f937\U0001f3fd|\U0001f937\U0001f3fe|\U0001f937\U0001f3ff|\U0001f46e\U0001f3fb|\U0001f46e\U0001f3fc|\U0001f46e\U0001f3fd|\U0001f46e\U0001f3fe|\U0001f46e\U0001f3ff|\U0001f575\ufe0f|\U0001f575\U0001f3fb|\U0001f575\U0001f3fc|\U0001f575\U0001f3fd|\U0001f575\U0001f3fe|\U0001f575\U0001f3ff|\U0001f482\U0001f3fb|\U0001f482\U0001f3fc|\U0001f482\U0001f3fd|\U0001f482\U0001f3fe|\U0001f482\U0001f3ff|\U0001f477\U0001f3fb|\U0001f477\U0001f3fc|\U0001f477\U0001f3fd|\U0001f477\U0001f3fe|\U0001f477\U0001f3ff|\U0001f934\U0001f3fb|\U0001f934\U0001f3fc|\U0001f934\U0001f3fd|\U0001f934\U0001f3fe|\U0001f934\U0001f3ff|\U0001f478\U0001f3fb|\U0001f478\U0001f3fc|\U0001f478\U0001f3fd|\U0001f478\U0001f3fe|\U0001f478\U0001f3ff|\U0001f473\U0001f3fb|\U0001f473\U0001f3fc|\U0001f473\U0001f3fd|\U0001f473\U0001f3fe|\U0001f473\U0001f3ff|\U0001f472\U0001f3fb|\U0001f472\U0001f3fc|\U0001f472\U0001f3fd|\U0001f472\U0001f3fe|\U0001f472\U0001f3ff|\U0001f9d5\U0001f3fb|\U0001f9d5\U0001f3fc|\U0001f9d5\U0001f3fd|\U0001f9d5\U0001f3fe|\U0001f9d5\U0001f3ff|\U0001f935\U0001f3fb|\U0001f935\U0001f3fc|\U0001f935\U0001f3fd|\U0001f935\U0001f3fe|\U0001f935\U0001f3ff|\U0001f470\U0001f3fb|\U0001f470\U0001f3fc|\U0001f470\U0001f3fd|\U0001f470\U0001f3fe|\U0001f470\U0001f3ff|\U0001f930\U0001f3fb|\U0001f930\U0001f3fc|\U0001f930\U0001f3fd|\U0001f930\U0001f3fe|\U0001f930\U0001f3ff|\U0001f931\U0001f3fb|\U0001f931\U0001f3fc|\U0001f931\U0001f3fd|\U0001f931\U0001f3fe|\U0001f931\U0001f3ff|\U0001f47c\U0001f3fb|\U0001f47c\U0001f3fc|\U0001f47c\U0001f3fd|\U0001f47c\U0001f3fe|\U0001f47c\U0001f3ff|\U0001f385\U0001f3fb|\U0001f385\U0001f3fc|\U0001f385\U0001f3fd|\U0001f385\U0001f3fe|\U0001f385\U0001f3ff|\U0001f936\U0001f3fb|\U0001f936\U0001f3fc|\U0001f936\U0001f3fd|\U0001f936\U0001f3fe|\U0001f936\U0001f3ff|\U0001f9b8\U0001f3fb|\U0001f9b8\U0001f3fc|\U0001f9b8\U0001f3fd|\U0001f9b8\U0001f3fe|\U0001f9b8\U0001f3ff|\U0001f9b9\U0001f3fb|\U0001f9b9\U0001f3fc|\U0001f9b9\U0001f3fd|\U0001f9b9\U0001f3fe|\U0001f9b9\U0001f3ff|\U0001f9d9\U0001f3fb|\U0001f9d9\U0001f3fc|\U0001f9d9\U0001f3fd|\U0001f9d9\U0001f3fe|\U0001f9d9\U0001f3ff|\U0001f9da\U0001f3fb|\U0001f9da\U0001f3fc|\U0001f9da\U0001f3fd|\U0001f9da\U0001f3fe|\U0001f9da\U0001f3ff|\U0001f9db\U0001f3fb|\U0001f9db\U0001f3fc|\U0001f9db\U0001f3fd|\U0001f9db\U0001f3fe|\U0001f9db\U0001f3ff|\U0001f9dc\U0001f3fb|\U0001f9dc\U0001f3fc|\U0001f9dc\U0001f3fd|\U0001f9dc\U0001f3fe|\U0001f9dc\U0001f3ff|\U0001f9dd\U0001f3fb|\U0001f9dd\U0001f3fc|\U0001f9dd\U0001f3fd|\U0001f9dd\U0001f3fe|\U0001f9dd\U0001f3ff|\U0001f486\U0001f3fb|\U0001f486\U0001f3fc|\U0001f486\U0001f3fd|\U0001f486\U0001f3fe|\U0001f486\U0001f3ff|\U0001f487\U0001f3fb|\U0001f487\U0001f3fc|\U0001f487\U0001f3fd|\U0001f487\U0001f3fe|\U0001f487\U0001f3ff|\U0001f6b6\U0001f3fb|\U0001f6b6\U0001f3fc|\U0001f6b6\U0001f3fd|\U0001f6b6\U0001f3fe|\U0001f6b6\U0001f3ff|\U0001f9cd\U0001f3fb|\U0001f9cd\U0001f3fc|\U0001f9cd\U0001f3fd|\U0001f9cd\U0001f3fe|\U0001f9cd\U0001f3ff|\U0001f9ce\U0001f3fb|\U0001f9ce\U0001f3fc|\U0001f9ce\U0001f3fd|\U0001f9ce\U0001f3fe|\U0001f9ce\U0001f3ff|\U0001f3c3\U0001f3fb|\U0001f3c3\U0001f3fc|\U0001f3c3\U0001f3fd|\U0001f3c3\U0001f3fe|\U0001f3c3\U0001f3ff|\U0001f483\U0001f3fb|\U0001f483\U0001f3fc|\U0001f483\U0001f3fd|\U0001f483\U0001f3fe|\U0001f483\U0001f3ff|\U0001f57a\U0001f3fb|\U0001f57a\U0001f3fc|\U0001f57a\U0001f3fd|\U0001f57a\U0001f3fe|\U0001f57a\U0001f3ff|\U0001f574\ufe0f|\U0001f574\U0001f3fb|\U0001f574\U0001f3fc|\U0001f574\U0001f3fd|\U0001f574\U0001f3fe|\U0001f574\U0001f3ff|\U0001f9d6\U0001f3fb|\U0001f9d6\U0001f3fc|\U0001f9d6\U0001f3fd|\U0001f9d6\U0001f3fe|\U0001f9d6\U0001f3ff|\U0001f9d7\U0001f3fb|\U0001f9d7\U0001f3fc|\U0001f9d7\U0001f3fd|\U0001f9d7\U0001f3fe|\U0001f9d7\U0001f3ff|\U0001f3c7\U0001f3fb|\U0001f3c7\U0001f3fc|\U0001f3c7\U0001f3fd|\U0001f3c7\U0001f3fe|\U0001f3c7\U0001f3ff|\u26f7\ufe0f|\U0001f3c2\U0001f3fb|\U0001f3c2\U0001f3fc|\U0001f3c2\U0001f3fd|\U0001f3c2\U0001f3fe|\U0001f3c2\U0001f3ff|\U0001f3cc\ufe0f|\U0001f3cc\U0001f3fb|\U0001f3cc\U0001f3fc|\U0001f3cc\U0001f3fd|\U0001f3cc\U0001f3fe|\U0001f3cc\U0001f3ff|\U0001f3c4\U0001f3fb|\U0001f3c4\U0001f3fc|\U0001f3c4\U0001f3fd|\U0001f3c4\U0001f3fe|\U0001f3c4\U0001f3ff|\U0001f6a3\U0001f3fb|\U0001f6a3\U0001f3fc|\U0001f6a3\U0001f3fd|\U0001f6a3\U0001f3fe|\U0001f6a3\U0001f3ff|\U0001f3ca\U0001f3fb|\U0001f3ca\U0001f3fc|\U0001f3ca\U0001f3fd|\U0001f3ca\U0001f3fe|\U0001f3ca\U0001f3ff|\u26f9\ufe0f|\u26f9\U0001f3fb|\u26f9\U0001f3fc|\u26f9\U0001f3fd|\u26f9\U0001f3fe|\u26f9\U0001f3ff|\U0001f3cb\ufe0f|\U0001f3cb\U0001f3fb|\U0001f3cb\U0001f3fc|\U0001f3cb\U0001f3fd|\U0001f3cb\U0001f3fe|\U0001f3cb\U0001f3ff|\U0001f6b4\U0001f3fb|\U0001f6b4\U0001f3fc|\U0001f6b4\U0001f3fd|\U0001f6b4\U0001f3fe|\U0001f6b4\U0001f3ff|\U0001f6b5\U0001f3fb|\U0001f6b5\U0001f3fc|\U0001f6b5\U0001f3fd|\U0001f6b5\U0001f3fe|\U0001f6b5\U0001f3ff|\U0001f938\U0001f3fb|\U0001f938\U0001f3fc|\U0001f938\U0001f3fd|\U0001f938\U0001f3fe|\U0001f938\U0001f3ff|\U0001f93d\U0001f3fb|\U0001f93d\U0001f3fc|\U0001f93d\U0001f3fd|\U0001f93d\U0001f3fe|\U0001f93d\U0001f3ff|\U0001f93e\U0001f3fb|\U0001f93e\U0001f3fc|\U0001f93e\U0001f3fd|\U0001f93e\U0001f3fe|\U0001f93e\U0001f3ff|\U0001f939\U0001f3fb|\U0001f939\U0001f3fc|\U0001f939\U0001f3fd|\U0001f939\U0001f3fe|\U0001f939\U0001f3ff|\U0001f9d8\U0001f3fb|\U0001f9d8\U0001f3fc|\U0001f9d8\U0001f3fd|\U0001f9d8\U0001f3fe|\U0001f9d8\U0001f3ff|\U0001f6c0\U0001f3fb|\U0001f6c0\U0001f3fc|\U0001f6c0\U0001f3fd|\U0001f6c0\U0001f3fe|\U0001f6c0\U0001f3ff|\U0001f6cc\U0001f3fb|\U0001f6cc\U0001f3fc|\U0001f6cc\U0001f3fd|\U0001f6cc\U0001f3fe|\U0001f6cc\U0001f3ff|\U0001f46d\U0001f3fb|\U0001f46d\U0001f3fc|\U0001f46d\U0001f3fd|\U0001f46d\U0001f3fe|\U0001f46d\U0001f3ff|\U0001f46b\U0001f3fb|\U0001f46b\U0001f3fc|\U0001f46b\U0001f3fd|\U0001f46b\U0001f3fe|\U0001f46b\U0001f3ff|\U0001f46c\U0001f3fb|\U0001f46c\U0001f3fc|\U0001f46c\U0001f3fd|\U0001f46c\U0001f3fe|\U0001f46c\U0001f3ff|\U0001f5e3\ufe0f|\U0001f43f\ufe0f|\U0001f54a\ufe0f|\U0001f577\ufe0f|\U0001f578\ufe0f|\U0001f3f5\ufe0f|\u2618\ufe0f|\U0001f336\ufe0f|\U0001f37d\ufe0f|\U0001f5fa\ufe0f|\U0001f3d4\ufe0f|\u26f0\ufe0f|\U0001f3d5\ufe0f|\U0001f3d6\ufe0f|\U0001f3dc\ufe0f|\U0001f3dd\ufe0f|\U0001f3de\ufe0f|\U0001f3df\ufe0f|\U0001f3db\ufe0f|\U0001f3d7\ufe0f|\U0001f3d8\ufe0f|\U0001f3da\ufe0f|\u26e9\ufe0f|\U0001f3d9\ufe0f|\u2668\ufe0f|\U0001f3ce\ufe0f|\U0001f3cd\ufe0f|\U0001f6e3\ufe0f|\U0001f6e4\ufe0f|\U0001f6e2\ufe0f|\U0001f6f3\ufe0f|\u26f4\ufe0f|\U0001f6e5\ufe0f|\u2708\ufe0f|\U0001f6e9\ufe0f|\U0001f6f0\ufe0f|\U0001f6ce\ufe0f|\u23f1\ufe0f|\u23f2\ufe0f|\U0001f570\ufe0f|\U0001f321\ufe0f|\u2600\ufe0f|\u2601\ufe0f|\u26c8\ufe0f|\U0001f324\ufe0f|\U0001f325\ufe0f|\U0001f326\ufe0f|\U0001f327\ufe0f|\U0001f328\ufe0f|\U0001f329\ufe0f|\U0001f32a\ufe0f|\U0001f32b\ufe0f|\U0001f32c\ufe0f|\u2602\ufe0f|\u26f1\ufe0f|\u2744\ufe0f|\u2603\ufe0f|\u2604\ufe0f|\U0001f397\ufe0f|\U0001f39f\ufe0f|\U0001f396\ufe0f|\u26f8\ufe0f|\U0001f579\ufe0f|\u2660\ufe0f|\u2665\ufe0f|\u2666\ufe0f|\u2663\ufe0f|\u265f\ufe0f|\U0001f5bc\ufe0f|\U0001f576\ufe0f|\U0001f6cd\ufe0f|\u26d1\ufe0f|\U0001f399\ufe0f|\U0001f39a\ufe0f|\U0001f39b\ufe0f|\u260e\ufe0f|\U0001f5a5\ufe0f|\U0001f5a8\ufe0f|\u2328\ufe0f|\U0001f5b1\ufe0f|\U0001f5b2\ufe0f|\U0001f39e\ufe0f|\U0001f4fd\ufe0f|\U0001f56f\ufe0f|\U0001f5de\ufe0f|\U0001f3f7\ufe0f|\u2709\ufe0f|\U0001f5f3\ufe0f|\u270f\ufe0f|\u2712\ufe0f|\U0001f58b\ufe0f|\U0001f58a\ufe0f|\U0001f58c\ufe0f|\U0001f58d\ufe0f|\U0001f5c2\ufe0f|\U0001f5d2\ufe0f|\U0001f5d3\ufe0f|\U0001f587\ufe0f|\u2702\ufe0f|\U0001f5c3\ufe0f|\U0001f5c4\ufe0f|\U0001f5d1\ufe0f|\U0001f5dd\ufe0f|\u26cf\ufe0f|\u2692\ufe0f|\U0001f6e0\ufe0f|\U0001f5e1\ufe0f|\u2694\ufe0f|\U0001f6e1\ufe0f|\u2699\ufe0f|\U0001f5dc\ufe0f|\u2696\ufe0f|\u26d3\ufe0f|\u2697\ufe0f|\U0001f6cf\ufe0f|\U0001f6cb\ufe0f|\u26b0\ufe0f|\u26b1\ufe0f|\u26a0\ufe0f|\u2622\ufe0f|\u2623\ufe0f|\u2b06\ufe0f|\u2197\ufe0f|\u27a1\ufe0f|\u2198\ufe0f|\u2b07\ufe0f|\u2199\ufe0f|\u2b05\ufe0f|\u2196\ufe0f|\u2195\ufe0f|\u2194\ufe0f|\u21a9\ufe0f|\u21aa\ufe0f|\u2934\ufe0f|\u2935\ufe0f|\u269b\ufe0f|\U0001f549\ufe0f|\u2721\ufe0f|\u2638\ufe0f|\u262f\ufe0f|\u271d\ufe0f|\u2626\ufe0f|\u262a\ufe0f|\u262e\ufe0f|\u25b6\ufe0f|\u23ed\ufe0f|\u23ef\ufe0f|\u25c0\ufe0f|\u23ee\ufe0f|\u23f8\ufe0f|\u23f9\ufe0f|\u23fa\ufe0f|\u23cf\ufe0f|\u2640\ufe0f|\u2642\ufe0f|\u2695\ufe0f|\u267e\ufe0f|\u267b\ufe0f|\u269c\ufe0f|\u2611\ufe0f|\u2714\ufe0f|\u2716\ufe0f|\u303d\ufe0f|\u2733\ufe0f|\u2734\ufe0f|\u2747\ufe0f|\u203c\ufe0f|\u2049\ufe0f|\u3030\ufe0f|\xa9\ufe0f|\xae\ufe0f|\u2122\ufe0f|\\#\u20e3|\\*\u20e3|0\u20e3|1\u20e3|2\u20e3|3\u20e3|4\u20e3|5\u20e3|6\u20e3|7\u20e3|8\u20e3|9\u20e3|\U0001f170\ufe0f|\U0001f171\ufe0f|\u2139\ufe0f|\u24c2\ufe0f|\U0001f17e\ufe0f|\U0001f17f\ufe0f|\U0001f202\ufe0f|\U0001f237\ufe0f|\u3297\ufe0f|\u3299\ufe0f|\u25fc\ufe0f|\u25fb\ufe0f|\u25aa\ufe0f|\u25ab\ufe0f|\U0001f3f3\ufe0f|\U0001f1e6\U0001f1e8|\U0001f1e6\U0001f1e9|\U0001f1e6\U0001f1ea|\U0001f1e6\U0001f1eb|\U0001f1e6\U0001f1ec|\U0001f1e6\U0001f1ee|\U0001f1e6\U0001f1f1|\U0001f1e6\U0001f1f2|\U0001f1e6\U0001f1f4|\U0001f1e6\U0001f1f6|\U0001f1e6\U0001f1f7|\U0001f1e6\U0001f1f8|\U0001f1e6\U0001f1f9|\U0001f1e6\U0001f1fa|\U0001f1e6\U0001f1fc|\U0001f1e6\U0001f1fd|\U0001f1e6\U0001f1ff|\U0001f1e7\U0001f1e6|\U0001f1e7\U0001f1e7|\U0001f1e7\U0001f1e9|\U0001f1e7\U0001f1ea|\U0001f1e7\U0001f1eb|\U0001f1e7\U0001f1ec|\U0001f1e7\U0001f1ed|\U0001f1e7\U0001f1ee|\U0001f1e7\U0001f1ef|\U0001f1e7\U0001f1f1|\U0001f1e7\U0001f1f2|\U0001f1e7\U0001f1f3|\U0001f1e7\U0001f1f4|\U0001f1e7\U0001f1f6|\U0001f1e7\U0001f1f7|\U0001f1e7\U0001f1f8|\U0001f1e7\U0001f1f9|\U0001f1e7\U0001f1fb|\U0001f1e7\U0001f1fc|\U0001f1e7\U0001f1fe|\U0001f1e7\U0001f1ff|\U0001f1e8\U0001f1e6|\U0001f1e8\U0001f1e8|\U0001f1e8\U0001f1e9|\U0001f1e8\U0001f1eb|\U0001f1e8\U0001f1ec|\U0001f1e8\U0001f1ed|\U0001f1e8\U0001f1ee|\U0001f1e8\U0001f1f0|\U0001f1e8\U0001f1f1|\U0001f1e8\U0001f1f2|\U0001f1e8\U0001f1f3|\U0001f1e8\U0001f1f4|\U0001f1e8\U0001f1f5|\U0001f1e8\U0001f1f7|\U0001f1e8\U0001f1fa|\U0001f1e8\U0001f1fb|\U0001f1e8\U0001f1fc|\U0001f1e8\U0001f1fd|\U0001f1e8\U0001f1fe|\U0001f1e8\U0001f1ff|\U0001f1e9\U0001f1ea|\U0001f1e9\U0001f1ec|\U0001f1e9\U0001f1ef|\U0001f1e9\U0001f1f0|\U0001f1e9\U0001f1f2|\U0001f1e9\U0001f1f4|\U0001f1e9\U0001f1ff|\U0001f1ea\U0001f1e6|\U0001f1ea\U0001f1e8|\U0001f1ea\U0001f1ea|\U0001f1ea\U0001f1ec|\U0001f1ea\U0001f1ed|\U0001f1ea\U0001f1f7|\U0001f1ea\U0001f1f8|\U0001f1ea\U0001f1f9|\U0001f1ea\U0001f1fa|\U0001f1eb\U0001f1ee|\U0001f1eb\U0001f1ef|\U0001f1eb\U0001f1f0|\U0001f1eb\U0001f1f2|\U0001f1eb\U0001f1f4|\U0001f1eb\U0001f1f7|\U0001f1ec\U0001f1e6|\U0001f1ec\U0001f1e7|\U0001f1ec\U0001f1e9|\U0001f1ec\U0001f1ea|\U0001f1ec\U0001f1eb|\U0001f1ec\U0001f1ec|\U0001f1ec\U0001f1ed|\U0001f1ec\U0001f1ee|\U0001f1ec\U0001f1f1|\U0001f1ec\U0001f1f2|\U0001f1ec\U0001f1f3|\U0001f1ec\U0001f1f5|\U0001f1ec\U0001f1f6|\U0001f1ec\U0001f1f7|\U0001f1ec\U0001f1f8|\U0001f1ec\U0001f1f9|\U0001f1ec\U0001f1fa|\U0001f1ec\U0001f1fc|\U0001f1ec\U0001f1fe|\U0001f1ed\U0001f1f0|\U0001f1ed\U0001f1f2|\U0001f1ed\U0001f1f3|\U0001f1ed\U0001f1f7|\U0001f1ed\U0001f1f9|\U0001f1ed\U0001f1fa|\U0001f1ee\U0001f1e8|\U0001f1ee\U0001f1e9|\U0001f1ee\U0001f1ea|\U0001f1ee\U0001f1f1|\U0001f1ee\U0001f1f2|\U0001f1ee\U0001f1f3|\U0001f1ee\U0001f1f4|\U0001f1ee\U0001f1f6|\U0001f1ee\U0001f1f7|\U0001f1ee\U0001f1f8|\U0001f1ee\U0001f1f9|\U0001f1ef\U0001f1ea|\U0001f1ef\U0001f1f2|\U0001f1ef\U0001f1f4|\U0001f1ef\U0001f1f5|\U0001f1f0\U0001f1ea|\U0001f1f0\U0001f1ec|\U0001f1f0\U0001f1ed|\U0001f1f0\U0001f1ee|\U0001f1f0\U0001f1f2|\U0001f1f0\U0001f1f3|\U0001f1f0\U0001f1f5|\U0001f1f0\U0001f1f7|\U0001f1f0\U0001f1fc|\U0001f1f0\U0001f1fe|\U0001f1f0\U0001f1ff|\U0001f1f1\U0001f1e6|\U0001f1f1\U0001f1e7|\U0001f1f1\U0001f1e8|\U0001f1f1\U0001f1ee|\U0001f1f1\U0001f1f0|\U0001f1f1\U0001f1f7|\U0001f1f1\U0001f1f8|\U0001f1f1\U0001f1f9|\U0001f1f1\U0001f1fa|\U0001f1f1\U0001f1fb|\U0001f1f1\U0001f1fe|\U0001f1f2\U0001f1e6|\U0001f1f2\U0001f1e8|\U0001f1f2\U0001f1e9|\U0001f1f2\U0001f1ea|\U0001f1f2\U0001f1eb|\U0001f1f2\U0001f1ec|\U0001f1f2\U0001f1ed|\U0001f1f2\U0001f1f0|\U0001f1f2\U0001f1f1|\U0001f1f2\U0001f1f2|\U0001f1f2\U0001f1f3|\U0001f1f2\U0001f1f4|\U0001f1f2\U0001f1f5|\U0001f1f2\U0001f1f6|\U0001f1f2\U0001f1f7|\U0001f1f2\U0001f1f8|\U0001f1f2\U0001f1f9|\U0001f1f2\U0001f1fa|\U0001f1f2\U0001f1fb|\U0001f1f2\U0001f1fc|\U0001f1f2\U0001f1fd|\U0001f1f2\U0001f1fe|\U0001f1f2\U0001f1ff|\U0001f1f3\U0001f1e6|\U0001f1f3\U0001f1e8|\U0001f1f3\U0001f1ea|\U0001f1f3\U0001f1eb|\U0001f1f3\U0001f1ec|\U0001f1f3\U0001f1ee|\U0001f1f3\U0001f1f1|\U0001f1f3\U0001f1f4|\U0001f1f3\U0001f1f5|\U0001f1f3\U0001f1f7|\U0001f1f3\U0001f1fa|\U0001f1f3\U0001f1ff|\U0001f1f4\U0001f1f2|\U0001f1f5\U0001f1e6|\U0001f1f5\U0001f1ea|\U0001f1f5\U0001f1eb|\U0001f1f5\U0001f1ec|\U0001f1f5\U0001f1ed|\U0001f1f5\U0001f1f0|\U0001f1f5\U0001f1f1|\U0001f1f5\U0001f1f2|\U0001f1f5\U0001f1f3|\U0001f1f5\U0001f1f7|\U0001f1f5\U0001f1f8|\U0001f1f5\U0001f1f9|\U0001f1f5\U0001f1fc|\U0001f1f5\U0001f1fe|\U0001f1f6\U0001f1e6|\U0001f1f7\U0001f1ea|\U0001f1f7\U0001f1f4|\U0001f1f7\U0001f1f8|\U0001f1f7\U0001f1fa|\U0001f1f7\U0001f1fc|\U0001f1f8\U0001f1e6|\U0001f1f8\U0001f1e7|\U0001f1f8\U0001f1e8|\U0001f1f8\U0001f1e9|\U0001f1f8\U0001f1ea|\U0001f1f8\U0001f1ec|\U0001f1f8\U0001f1ed|\U0001f1f8\U0001f1ee|\U0001f1f8\U0001f1ef|\U0001f1f8\U0001f1f0|\U0001f1f8\U0001f1f1|\U0001f1f8\U0001f1f2|\U0001f1f8\U0001f1f3|\U0001f1f8\U0001f1f4|\U0001f1f8\U0001f1f7|\U0001f1f8\U0001f1f8|\U0001f1f8\U0001f1f9|\U0001f1f8\U0001f1fb|\U0001f1f8\U0001f1fd|\U0001f1f8\U0001f1fe|\U0001f1f8\U0001f1ff|\U0001f1f9\U0001f1e6|\U0001f1f9\U0001f1e8|\U0001f1f9\U0001f1e9|\U0001f1f9\U0001f1eb|\U0001f1f9\U0001f1ec|\U0001f1f9\U0001f1ed|\U0001f1f9\U0001f1ef|\U0001f1f9\U0001f1f0|\U0001f1f9\U0001f1f1|\U0001f1f9\U0001f1f2|\U0001f1f9\U0001f1f3|\U0001f1f9\U0001f1f4|\U0001f1f9\U0001f1f7|\U0001f1f9\U0001f1f9|\U0001f1f9\U0001f1fb|\U0001f1f9\U0001f1fc|\U0001f1f9\U0001f1ff|\U0001f1fa\U0001f1e6|\U0001f1fa\U0001f1ec|\U0001f1fa\U0001f1f2|\U0001f1fa\U0001f1f3|\U0001f1fa\U0001f1f8|\U0001f1fa\U0001f1fe|\U0001f1fa\U0001f1ff|\U0001f1fb\U0001f1e6|\U0001f1fb\U0001f1e8|\U0001f1fb\U0001f1ea|\U0001f1fb\U0001f1ec|\U0001f1fb\U0001f1ee|\U0001f1fb\U0001f1f3|\U0001f1fb\U0001f1fa|\U0001f1fc\U0001f1eb|\U0001f1fc\U0001f1f8|\U0001f1fd\U0001f1f0|\U0001f1fe\U0001f1ea|\U0001f1fe\U0001f1f9|\U0001f1ff\U0001f1e6|\U0001f1ff\U0001f1f2|\U0001f1ff\U0001f1fc|\U0001f600|\U0001f603|\U0001f604|\U0001f601|\U0001f606|\U0001f605|\U0001f923|\U0001f602|\U0001f642|\U0001f643|\U0001f609|\U0001f60a|\U0001f607|\U0001f970|\U0001f60d|\U0001f929|\U0001f618|\U0001f617|\u263a|\U0001f61a|\U0001f619|\U0001f60b|\U0001f61b|\U0001f61c|\U0001f92a|\U0001f61d|\U0001f911|\U0001f917|\U0001f92d|\U0001f92b|\U0001f914|\U0001f910|\U0001f928|\U0001f610|\U0001f611|\U0001f636|\U0001f60f|\U0001f612|\U0001f644|\U0001f62c|\U0001f925|\U0001f60c|\U0001f614|\U0001f62a|\U0001f924|\U0001f634|\U0001f637|\U0001f912|\U0001f915|\U0001f922|\U0001f92e|\U0001f927|\U0001f975|\U0001f976|\U0001f974|\U0001f635|\U0001f92f|\U0001f920|\U0001f973|\U0001f60e|\U0001f913|\U0001f9d0|\U0001f615|\U0001f61f|\U0001f641|\u2639|\U0001f62e|\U0001f62f|\U0001f632|\U0001f633|\U0001f97a|\U0001f626|\U0001f627|\U0001f628|\U0001f630|\U0001f625|\U0001f622|\U0001f62d|\U0001f631|\U0001f616|\U0001f623|\U0001f61e|\U0001f613|\U0001f629|\U0001f62b|\U0001f971|\U0001f624|\U0001f621|\U0001f620|\U0001f92c|\U0001f608|\U0001f47f|\U0001f480|\u2620|\U0001f4a9|\U0001f921|\U0001f479|\U0001f47a|\U0001f47b|\U0001f47d|\U0001f47e|\U0001f916|\U0001f63a|\U0001f638|\U0001f639|\U0001f63b|\U0001f63c|\U0001f63d|\U0001f640|\U0001f63f|\U0001f63e|\U0001f648|\U0001f649|\U0001f64a|\U0001f48b|\U0001f48c|\U0001f498|\U0001f49d|\U0001f496|\U0001f497|\U0001f493|\U0001f49e|\U0001f495|\U0001f49f|\u2763|\U0001f494|\u2764|\U0001f9e1|\U0001f49b|\U0001f49a|\U0001f499|\U0001f49c|\U0001f90e|\U0001f5a4|\U0001f90d|\U0001f4af|\U0001f4a2|\U0001f4a5|\U0001f4ab|\U0001f4a6|\U0001f4a8|\U0001f573|\U0001f4a3|\U0001f4ac|\U0001f5e8|\U0001f5ef|\U0001f4ad|\U0001f4a4|\U0001f44b|\U0001f91a|\U0001f590|\u270b|\U0001f596|\U0001f44c|\U0001f90f|\u270c|\U0001f91e|\U0001f91f|\U0001f918|\U0001f919|\U0001f448|\U0001f449|\U0001f446|\U0001f595|\U0001f447|\u261d|\U0001f44d|\U0001f44e|\u270a|\U0001f44a|\U0001f91b|\U0001f91c|\U0001f44f|\U0001f64c|\U0001f450|\U0001f932|\U0001f91d|\U0001f64f|\u270d|\U0001f485|\U0001f933|\U0001f4aa|\U0001f9be|\U0001f9bf|\U0001f9b5|\U0001f9b6|\U0001f442|\U0001f9bb|\U0001f443|\U0001f9e0|\U0001f9b7|\U0001f9b4|\U0001f440|\U0001f441|\U0001f445|\U0001f444|\U0001f476|\U0001f9d2|\U0001f466|\U0001f467|\U0001f9d1|\U0001f471|\U0001f468|\U0001f9d4|\U0001f469|\U0001f9d3|\U0001f474|\U0001f475|\U0001f64d|\U0001f64e|\U0001f645|\U0001f646|\U0001f481|\U0001f64b|\U0001f9cf|\U0001f647|\U0001f926|\U0001f937|\U0001f46e|\U0001f575|\U0001f482|\U0001f477|\U0001f934|\U0001f478|\U0001f473|\U0001f472|\U0001f9d5|\U0001f935|\U0001f470|\U0001f930|\U0001f931|\U0001f47c|\U0001f385|\U0001f936|\U0001f9b8|\U0001f9b9|\U0001f9d9|\U0001f9da|\U0001f9db|\U0001f9dc|\U0001f9dd|\U0001f9de|\U0001f9df|\U0001f486|\U0001f487|\U0001f6b6|\U0001f9cd|\U0001f9ce|\U0001f3c3|\U0001f483|\U0001f57a|\U0001f574|\U0001f46f|\U0001f9d6|\U0001f9d7|\U0001f93a|\U0001f3c7|\u26f7|\U0001f3c2|\U0001f3cc|\U0001f3c4|\U0001f6a3|\U0001f3ca|\u26f9|\U0001f3cb|\U0001f6b4|\U0001f6b5|\U0001f938|\U0001f93c|\U0001f93d|\U0001f93e|\U0001f939|\U0001f9d8|\U0001f6c0|\U0001f6cc|\U0001f46d|\U0001f46b|\U0001f46c|\U0001f48f|\U0001f491|\U0001f46a|\U0001f5e3|\U0001f464|\U0001f465|\U0001f463|\U0001f3fb|\U0001f3fc|\U0001f3fd|\U0001f3fe|\U0001f3ff|\U0001f9b0|\U0001f9b1|\U0001f9b3|\U0001f9b2|\U0001f435|\U0001f412|\U0001f98d|\U0001f9a7|\U0001f436|\U0001f415|\U0001f9ae|\U0001f429|\U0001f43a|\U0001f98a|\U0001f99d|\U0001f431|\U0001f408|\U0001f981|\U0001f42f|\U0001f405|\U0001f406|\U0001f434|\U0001f40e|\U0001f984|\U0001f993|\U0001f98c|\U0001f42e|\U0001f402|\U0001f403|\U0001f404|\U0001f437|\U0001f416|\U0001f417|\U0001f43d|\U0001f40f|\U0001f411|\U0001f410|\U0001f42a|\U0001f42b|\U0001f999|\U0001f992|\U0001f418|\U0001f98f|\U0001f99b|\U0001f42d|\U0001f401|\U0001f400|\U0001f439|\U0001f430|\U0001f407|\U0001f43f|\U0001f994|\U0001f987|\U0001f43b|\U0001f428|\U0001f43c|\U0001f9a5|\U0001f9a6|\U0001f9a8|\U0001f998|\U0001f9a1|\U0001f43e|\U0001f983|\U0001f414|\U0001f413|\U0001f423|\U0001f424|\U0001f425|\U0001f426|\U0001f427|\U0001f54a|\U0001f985|\U0001f986|\U0001f9a2|\U0001f989|\U0001f9a9|\U0001f99a|\U0001f99c|\U0001f438|\U0001f40a|\U0001f422|\U0001f98e|\U0001f40d|\U0001f432|\U0001f409|\U0001f995|\U0001f996|\U0001f433|\U0001f40b|\U0001f42c|\U0001f41f|\U0001f420|\U0001f421|\U0001f988|\U0001f419|\U0001f41a|\U0001f40c|\U0001f98b|\U0001f41b|\U0001f41c|\U0001f41d|\U0001f41e|\U0001f997|\U0001f577|\U0001f578|\U0001f982|\U0001f99f|\U0001f9a0|\U0001f490|\U0001f338|\U0001f4ae|\U0001f3f5|\U0001f339|\U0001f940|\U0001f33a|\U0001f33b|\U0001f33c|\U0001f337|\U0001f331|\U0001f332|\U0001f333|\U0001f334|\U0001f335|\U0001f33e|\U0001f33f|\u2618|\U0001f340|\U0001f341|\U0001f342|\U0001f343|\U0001f347|\U0001f348|\U0001f349|\U0001f34a|\U0001f34b|\U0001f34c|\U0001f34d|\U0001f96d|\U0001f34e|\U0001f34f|\U0001f350|\U0001f351|\U0001f352|\U0001f353|\U0001f95d|\U0001f345|\U0001f965|\U0001f951|\U0001f346|\U0001f954|\U0001f955|\U0001f33d|\U0001f336|\U0001f952|\U0001f96c|\U0001f966|\U0001f9c4|\U0001f9c5|\U0001f344|\U0001f95c|\U0001f330|\U0001f35e|\U0001f950|\U0001f956|\U0001f968|\U0001f96f|\U0001f95e|\U0001f9c7|\U0001f9c0|\U0001f356|\U0001f357|\U0001f969|\U0001f953|\U0001f354|\U0001f35f|\U0001f355|\U0001f32d|\U0001f96a|\U0001f32e|\U0001f32f|\U0001f959|\U0001f9c6|\U0001f95a|\U0001f373|\U0001f958|\U0001f372|\U0001f963|\U0001f957|\U0001f37f|\U0001f9c8|\U0001f9c2|\U0001f96b|\U0001f371|\U0001f358|\U0001f359|\U0001f35a|\U0001f35b|\U0001f35c|\U0001f35d|\U0001f360|\U0001f362|\U0001f363|\U0001f364|\U0001f365|\U0001f96e|\U0001f361|\U0001f95f|\U0001f960|\U0001f961|\U0001f980|\U0001f99e|\U0001f990|\U0001f991|\U0001f9aa|\U0001f366|\U0001f367|\U0001f368|\U0001f369|\U0001f36a|\U0001f382|\U0001f370|\U0001f9c1|\U0001f967|\U0001f36b|\U0001f36c|\U0001f36d|\U0001f36e|\U0001f36f|\U0001f37c|\U0001f95b|\u2615|\U0001f375|\U0001f376|\U0001f37e|\U0001f377|\U0001f378|\U0001f379|\U0001f37a|\U0001f37b|\U0001f942|\U0001f943|\U0001f964|\U0001f9c3|\U0001f9c9|\U0001f9ca|\U0001f962|\U0001f37d|\U0001f374|\U0001f944|\U0001f52a|\U0001f3fa|\U0001f30d|\U0001f30e|\U0001f30f|\U0001f310|\U0001f5fa|\U0001f5fe|\U0001f9ed|\U0001f3d4|\u26f0|\U0001f30b|\U0001f5fb|\U0001f3d5|\U0001f3d6|\U0001f3dc|\U0001f3dd|\U0001f3de|\U0001f3df|\U0001f3db|\U0001f3d7|\U0001f9f1|\U0001f3d8|\U0001f3da|\U0001f3e0|\U0001f3e1|\U0001f3e2|\U0001f3e3|\U0001f3e4|\U0001f3e5|\U0001f3e6|\U0001f3e8|\U0001f3e9|\U0001f3ea|\U0001f3eb|\U0001f3ec|\U0001f3ed|\U0001f3ef|\U0001f3f0|\U0001f492|\U0001f5fc|\U0001f5fd|\u26ea|\U0001f54c|\U0001f6d5|\U0001f54d|\u26e9|\U0001f54b|\u26f2|\u26fa|\U0001f301|\U0001f303|\U0001f3d9|\U0001f304|\U0001f305|\U0001f306|\U0001f307|\U0001f309|\u2668|\U0001f3a0|\U0001f3a1|\U0001f3a2|\U0001f488|\U0001f3aa|\U0001f682|\U0001f683|\U0001f684|\U0001f685|\U0001f686|\U0001f687|\U0001f688|\U0001f689|\U0001f68a|\U0001f69d|\U0001f69e|\U0001f68b|\U0001f68c|\U0001f68d|\U0001f68e|\U0001f690|\U0001f691|\U0001f692|\U0001f693|\U0001f694|\U0001f695|\U0001f696|\U0001f697|\U0001f698|\U0001f699|\U0001f69a|\U0001f69b|\U0001f69c|\U0001f3ce|\U0001f3cd|\U0001f6f5|\U0001f9bd|\U0001f9bc|\U0001f6fa|\U0001f6b2|\U0001f6f4|\U0001f6f9|\U0001f68f|\U0001f6e3|\U0001f6e4|\U0001f6e2|\u26fd|\U0001f6a8|\U0001f6a5|\U0001f6a6|\U0001f6d1|\U0001f6a7|\u2693|\u26f5|\U0001f6f6|\U0001f6a4|\U0001f6f3|\u26f4|\U0001f6e5|\U0001f6a2|\u2708|\U0001f6e9|\U0001f6eb|\U0001f6ec|\U0001fa82|\U0001f4ba|\U0001f681|\U0001f69f|\U0001f6a0|\U0001f6a1|\U0001f6f0|\U0001f680|\U0001f6f8|\U0001f6ce|\U0001f9f3|\u231b|\u23f3|\u231a|\u23f0|\u23f1|\u23f2|\U0001f570|\U0001f55b|\U0001f567|\U0001f550|\U0001f55c|\U0001f551|\U0001f55d|\U0001f552|\U0001f55e|\U0001f553|\U0001f55f|\U0001f554|\U0001f560|\U0001f555|\U0001f561|\U0001f556|\U0001f562|\U0001f557|\U0001f563|\U0001f558|\U0001f564|\U0001f559|\U0001f565|\U0001f55a|\U0001f566|\U0001f311|\U0001f312|\U0001f313|\U0001f314|\U0001f315|\U0001f316|\U0001f317|\U0001f318|\U0001f319|\U0001f31a|\U0001f31b|\U0001f31c|\U0001f321|\u2600|\U0001f31d|\U0001f31e|\U0001fa90|\u2b50|\U0001f31f|\U0001f320|\U0001f30c|\u2601|\u26c5|\u26c8|\U0001f324|\U0001f325|\U0001f326|\U0001f327|\U0001f328|\U0001f329|\U0001f32a|\U0001f32b|\U0001f32c|\U0001f300|\U0001f308|\U0001f302|\u2602|\u2614|\u26f1|\u26a1|\u2744|\u2603|\u26c4|\u2604|\U0001f525|\U0001f4a7|\U0001f30a|\U0001f383|\U0001f384|\U0001f386|\U0001f387|\U0001f9e8|\u2728|\U0001f388|\U0001f389|\U0001f38a|\U0001f38b|\U0001f38d|\U0001f38e|\U0001f38f|\U0001f390|\U0001f391|\U0001f9e7|\U0001f380|\U0001f381|\U0001f397|\U0001f39f|\U0001f3ab|\U0001f396|\U0001f3c6|\U0001f3c5|\U0001f947|\U0001f948|\U0001f949|\u26bd|\u26be|\U0001f94e|\U0001f3c0|\U0001f3d0|\U0001f3c8|\U0001f3c9|\U0001f3be|\U0001f94f|\U0001f3b3|\U0001f3cf|\U0001f3d1|\U0001f3d2|\U0001f94d|\U0001f3d3|\U0001f3f8|\U0001f94a|\U0001f94b|\U0001f945|\u26f3|\u26f8|\U0001f3a3|\U0001f93f|\U0001f3bd|\U0001f3bf|\U0001f6f7|\U0001f94c|\U0001f3af|\U0001fa80|\U0001fa81|\U0001f3b1|\U0001f52e|\U0001f9ff|\U0001f3ae|\U0001f579|\U0001f3b0|\U0001f3b2|\U0001f9e9|\U0001f9f8|\u2660|\u2665|\u2666|\u2663|\u265f|\U0001f0cf|\U0001f004|\U0001f3b4|\U0001f3ad|\U0001f5bc|\U0001f3a8|\U0001f9f5|\U0001f9f6|\U0001f453|\U0001f576|\U0001f97d|\U0001f97c|\U0001f9ba|\U0001f454|\U0001f455|\U0001f456|\U0001f9e3|\U0001f9e4|\U0001f9e5|\U0001f9e6|\U0001f457|\U0001f458|\U0001f97b|\U0001fa71|\U0001fa72|\U0001fa73|\U0001f459|\U0001f45a|\U0001f45b|\U0001f45c|\U0001f45d|\U0001f6cd|\U0001f392|\U0001f45e|\U0001f45f|\U0001f97e|\U0001f97f|\U0001f460|\U0001f461|\U0001fa70|\U0001f462|\U0001f451|\U0001f452|\U0001f3a9|\U0001f393|\U0001f9e2|\u26d1|\U0001f4ff|\U0001f484|\U0001f48d|\U0001f48e|\U0001f507|\U0001f508|\U0001f509|\U0001f50a|\U0001f4e2|\U0001f4e3|\U0001f4ef|\U0001f514|\U0001f515|\U0001f3bc|\U0001f3b5|\U0001f3b6|\U0001f399|\U0001f39a|\U0001f39b|\U0001f3a4|\U0001f3a7|\U0001f4fb|\U0001f3b7|\U0001f3b8|\U0001f3b9|\U0001f3ba|\U0001f3bb|\U0001fa95|\U0001f941|\U0001f4f1|\U0001f4f2|\u260e|\U0001f4de|\U0001f4df|\U0001f4e0|\U0001f50b|\U0001f50c|\U0001f4bb|\U0001f5a5|\U0001f5a8|\u2328|\U0001f5b1|\U0001f5b2|\U0001f4bd|\U0001f4be|\U0001f4bf|\U0001f4c0|\U0001f9ee|\U0001f3a5|\U0001f39e|\U0001f4fd|\U0001f3ac|\U0001f4fa|\U0001f4f7|\U0001f4f8|\U0001f4f9|\U0001f4fc|\U0001f50d|\U0001f50e|\U0001f56f|\U0001f4a1|\U0001f526|\U0001f3ee|\U0001fa94|\U0001f4d4|\U0001f4d5|\U0001f4d6|\U0001f4d7|\U0001f4d8|\U0001f4d9|\U0001f4da|\U0001f4d3|\U0001f4d2|\U0001f4c3|\U0001f4dc|\U0001f4c4|\U0001f4f0|\U0001f5de|\U0001f4d1|\U0001f516|\U0001f3f7|\U0001f4b0|\U0001f4b4|\U0001f4b5|\U0001f4b6|\U0001f4b7|\U0001f4b8|\U0001f4b3|\U0001f9fe|\U0001f4b9|\U0001f4b1|\U0001f4b2|\u2709|\U0001f4e7|\U0001f4e8|\U0001f4e9|\U0001f4e4|\U0001f4e5|\U0001f4e6|\U0001f4eb|\U0001f4ea|\U0001f4ec|\U0001f4ed|\U0001f4ee|\U0001f5f3|\u270f|\u2712|\U0001f58b|\U0001f58a|\U0001f58c|\U0001f58d|\U0001f4dd|\U0001f4bc|\U0001f4c1|\U0001f4c2|\U0001f5c2|\U0001f4c5|\U0001f4c6|\U0001f5d2|\U0001f5d3|\U0001f4c7|\U0001f4c8|\U0001f4c9|\U0001f4ca|\U0001f4cb|\U0001f4cc|\U0001f4cd|\U0001f4ce|\U0001f587|\U0001f4cf|\U0001f4d0|\u2702|\U0001f5c3|\U0001f5c4|\U0001f5d1|\U0001f512|\U0001f513|\U0001f50f|\U0001f510|\U0001f511|\U0001f5dd|\U0001f528|\U0001fa93|\u26cf|\u2692|\U0001f6e0|\U0001f5e1|\u2694|\U0001f52b|\U0001f3f9|\U0001f6e1|\U0001f527|\U0001f529|\u2699|\U0001f5dc|\u2696|\U0001f9af|\U0001f517|\u26d3|\U0001f9f0|\U0001f9f2|\u2697|\U0001f9ea|\U0001f9eb|\U0001f9ec|\U0001f52c|\U0001f52d|\U0001f4e1|\U0001f489|\U0001fa78|\U0001f48a|\U0001fa79|\U0001fa7a|\U0001f6aa|\U0001f6cf|\U0001f6cb|\U0001fa91|\U0001f6bd|\U0001f6bf|\U0001f6c1|\U0001fa92|\U0001f9f4|\U0001f9f7|\U0001f9f9|\U0001f9fa|\U0001f9fb|\U0001f9fc|\U0001f9fd|\U0001f9ef|\U0001f6d2|\U0001f6ac|\u26b0|\u26b1|\U0001f5ff|\U0001f3e7|\U0001f6ae|\U0001f6b0|\u267f|\U0001f6b9|\U0001f6ba|\U0001f6bb|\U0001f6bc|\U0001f6be|\U0001f6c2|\U0001f6c3|\U0001f6c4|\U0001f6c5|\u26a0|\U0001f6b8|\u26d4|\U0001f6ab|\U0001f6b3|\U0001f6ad|\U0001f6af|\U0001f6b1|\U0001f6b7|\U0001f4f5|\U0001f51e|\u2622|\u2623|\u2b06|\u2197|\u27a1|\u2198|\u2b07|\u2199|\u2b05|\u2196|\u2195|\u2194|\u21a9|\u21aa|\u2934|\u2935|\U0001f503|\U0001f504|\U0001f519|\U0001f51a|\U0001f51b|\U0001f51c|\U0001f51d|\U0001f6d0|\u269b|\U0001f549|\u2721|\u2638|\u262f|\u271d|\u2626|\u262a|\u262e|\U0001f54e|\U0001f52f|\u2648|\u2649|\u264a|\u264b|\u264c|\u264d|\u264e|\u264f|\u2650|\u2651|\u2652|\u2653|\u26ce|\U0001f500|\U0001f501|\U0001f502|\u25b6|\u23e9|\u23ed|\u23ef|\u25c0|\u23ea|\u23ee|\U0001f53c|\u23eb|\U0001f53d|\u23ec|\u23f8|\u23f9|\u23fa|\u23cf|\U0001f3a6|\U0001f505|\U0001f506|\U0001f4f6|\U0001f4f3|\U0001f4f4|\u2640|\u2642|\u2695|\u267e|\u267b|\u269c|\U0001f531|\U0001f4db|\U0001f530|\u2b55|\u2705|\u2611|\u2714|\u2716|\u274c|\u274e|\u2795|\u2796|\u2797|\u27b0|\u27bf|\u303d|\u2733|\u2734|\u2747|\u203c|\u2049|\u2753|\u2754|\u2755|\u2757|\u3030|\xa9|\xae|\u2122|\U0001f51f|\U0001f520|\U0001f521|\U0001f522|\U0001f523|\U0001f524|\U0001f170|\U0001f18e|\U0001f171|\U0001f191|\U0001f192|\U0001f193|\u2139|\U0001f194|\u24c2|\U0001f195|\U0001f196|\U0001f17e|\U0001f197|\U0001f17f|\U0001f198|\U0001f199|\U0001f19a|\U0001f201|\U0001f202|\U0001f237|\U0001f236|\U0001f22f|\U0001f250|\U0001f239|\U0001f21a|\U0001f232|\U0001f251|\U0001f238|\U0001f234|\U0001f233|\u3297|\u3299|\U0001f23a|\U0001f235|\U0001f534|\U0001f7e0|\U0001f7e1|\U0001f7e2|\U0001f535|\U0001f7e3|\U0001f7e4|\u26ab|\u26aa|\U0001f7e5|\U0001f7e7|\U0001f7e8|\U0001f7e9|\U0001f7e6|\U0001f7ea|\U0001f7eb|\u2b1b|\u2b1c|\u25fc|\u25fb|\u25fe|\u25fd|\u25aa|\u25ab|\U0001f536|\U0001f537|\U0001f538|\U0001f539|\U0001f53a|\U0001f53b|\U0001f4a0|\U0001f518|\U0001f533|\U0001f532|\U0001f3c1|\U0001f6a9|\U0001f38c|\U0001f3f4|\U0001f3f3')) -> str:
text = regex.sub(repl, text)
return text
def _email_normalize(self, text: str, repl: str, regex=re.compile(r'[a-zA-Z0-9.!#$%&\'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9.]+')) -> str:
text = regex.sub(repl, text)
return text
def _tel_normalize(self, text: str, repl: str, regex=re.compile(r'[()+\d.\-]*[ ]?\d{2,4}[-. ]+\d{3,4}[-. ]+\d{3,4}')) -> str:
text = regex.sub(repl, text)
return text
| true
| true
|
79052d490ace46e8da12949e6ca85539f01cf6c9
| 20,582
|
py
|
Python
|
scenic/projects/baselines/bert/trainer.py
|
keshavd/scenic
|
2f819916c316e7de73cd539c3a9a83c683ddb0ac
|
[
"Apache-2.0"
] | 688
|
2021-07-26T21:45:18.000Z
|
2022-03-31T11:53:34.000Z
|
scenic/projects/baselines/bert/trainer.py
|
keshavd/scenic
|
2f819916c316e7de73cd539c3a9a83c683ddb0ac
|
[
"Apache-2.0"
] | 35
|
2021-08-03T11:31:10.000Z
|
2022-03-31T21:58:58.000Z
|
scenic/projects/baselines/bert/trainer.py
|
keshavd/scenic
|
2f819916c316e7de73cd539c3a9a83c683ddb0ac
|
[
"Apache-2.0"
] | 88
|
2021-08-03T13:19:50.000Z
|
2022-03-31T08:35:22.000Z
|
"""BERT Training Script."""
import functools
from typing import Any, Callable, Dict, Tuple, Optional, Type
from absl import logging
from clu import metric_writers
from clu import periodic_actions
from flax import jax_utils
import flax.linen as nn
import jax
from jax.experimental import optimizers as jax_optimizers
import jax.numpy as jnp
import jax.profiler
import ml_collections
import numpy as np
from scenic.dataset_lib import dataset_utils
from scenic.projects.baselines.bert import bert_base_model
from scenic.projects.baselines.bert import train_utils as bert_train_utils
from scenic.train_lib import lr_schedules
from scenic.train_lib import optimizers
from scenic.train_lib import pretrain_utils
from scenic.train_lib import train_utils
def train_step(
*,
flax_model: nn.Module,
train_state: train_utils.TrainState,
batch: bert_base_model.Batch,
learning_rate_fn: Callable[[int], float],
loss_fn: bert_base_model.LossFn,
metrics_fn: bert_base_model.MetricFn,
config: ml_collections.ConfigDict,
debug: Optional[bool] = False
) -> Tuple[train_utils.TrainState, Dict[str, Tuple[float, int]], float]:
"""Runs a single step of training.
Given the state of the training and a batch of data, computes
the loss and updates the parameters of the model.
Note that in this code, the buffers of the first (train_state) and second
(batch) arguments are donated to the computation.
Args:
flax_model: A Flax model.
train_state: The state of training including the current global_step,
model_state, rng, and optimizer. The buffer of this argument can be
donated to the computation.
batch: A single batch of data. The buffer of this argument can be donated to
the computation.
learning_rate_fn: Learning rate scheduler which given the global_step
generates the learning rate.
loss_fn: A loss function that given logits, a batch, and parameters of the
model calculates the loss.
metrics_fn: A metrics function that given logits and batch of data,
calculates the metrics as well as the loss.
config: Configurations of the experiment.
debug: Whether the debug mode is enabled during training. `debug=True`
enables model specific logging/storing some values using
jax.host_callback.
Returns:
Updated state of training, computed metrics, and learning rate for logging.
"""
new_rng, rng = jax.random.split(train_state.rng)
# Bind the rng to the host/device we are on.
dropout_rng = train_utils.bind_rng_to_host_device(
rng, axis_name='batch', bind_to='device')
def training_loss_fn(params):
variables = {'params': params, **train_state.model_state}
output, new_model_state = flax_model.apply(
variables,
batch,
mutable=['batch_stats'],
train=True,
rngs={'dropout': dropout_rng},
debug=debug)
loss = loss_fn(output, batch, variables['params'])
return loss, (new_model_state, output)
compute_gradient_fn = jax.value_and_grad(training_loss_fn, has_aux=True)
step = train_state.global_step
lr = learning_rate_fn(step)
(train_cost,
(new_model_state,
output)), grad = compute_gradient_fn(train_state.optimizer.target)
del train_cost
# We clip gradients before pmean in BERT.
if config.get('max_grad_norm', None) is not None:
grad = jax_optimizers.clip_grads(grad, config.max_grad_norm)
# Re-use same axis_name as in the call to `pmap(...train_step...)` below.
grad = jax.lax.pmean(grad, axis_name='batch')
new_optimizer = train_state.optimizer.apply_gradient(grad, learning_rate=lr)
# Explicit weight decay, if necessary.
if config.get('explicit_weight_decay', None) is not None:
new_optimizer = new_optimizer.replace(
target=optimizers.tree_map_with_names(
functools.partial(
optimizers.decay_weight_fn,
lr=lr,
decay=config.explicit_weight_decay),
new_optimizer.target,
match_name_fn=lambda name: 'kernel' in name))
metrics = metrics_fn(output, batch)
new_train_state = train_state.replace( # pytype: disable=attribute-error
global_step=step + 1,
optimizer=new_optimizer,
model_state=new_model_state,
rng=new_rng)
return new_train_state, metrics, lr
def eval_step(
*,
flax_model: nn.Module,
train_state: train_utils.TrainState,
batch: bert_base_model.Batch,
metrics_fn: bert_base_model.MetricFn,
all_gather: bool = False,
debug: Optional[bool] = False
) -> Tuple[Dict[str, Tuple[float, int]], Optional[jnp.ndarray],
Optional[jnp.ndarray]]:
"""Runs a single step of training.
Note that in this code, the buffer of the second argument (batch) is donated
to the computation.
Assumed API of metrics_fn is:
```metrics = metrics_fn(logits, batch)
where batch is yielded by the batch iterator, and metrics is a dictionary
mapping metric name to a vector of per example measurements. eval_step will
aggregate (by summing) all per example measurements and divide by the
aggregated normalizers. For each given metric we compute:
1/N sum_{b in batch_iter} metric(b), where N is the sum of normalizer
over all batches.
Args:
flax_model: A Flax model.
train_state: TrainState, the state of training including the current
global_step, model_state, rng, and optimizer. The buffer of this argument
can be donated to the computation.
batch: A single batch of data. a metrics function, that given logits and
batch of data, calculates the metrics as well as the loss.
metrics_fn: A metrics function, that given logits and batch of data,
calculates the metrics as well as the loss.
all_gather: If True, the function gather batch and output of
model in from all hosts, using `jax.lax.all_gather` and return it, e.g.,
for computing global metrics on CPU.
debug: Whether the debug mode is enabled during evaluation. `debug=True`
enables model specific logging/storing some values using
jax.host_callback.
Returns:
Calculated metrics and optionally output, and batch after all_gather.
"""
variables = {
'params': train_state.optimizer.target,
**train_state.model_state
}
output = flax_model.apply(
variables, batch, train=False, mutable=False, debug=debug)
metrics = metrics_fn(output, batch)
if all_gather:
output = jax.lax.all_gather(output, 'batch')
batch = jax.lax.all_gather(batch, 'batch')
return metrics, output, batch
else:
return metrics, None, None
def representation_fn(
*,
flax_model: nn.Module,
train_state: train_utils.TrainState,
batch: bert_base_model.Batch,
representation_layer: str,
gather_to_host: bool = True
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Feeds the inputs to the model and returns their representations.
Args:
flax_model: A Flax model.
train_state: TrainState, the state of training including the current
global_step, model_state, rng, and optimizer. The buffer of this argument
can be donated to the computation.
batch: A single batch of data from the dataset.
representation_layer: The name of the layer to use as the representation.
gather_to_host: Whether to gather results from all devices to the host,
rather than leaving them distributed.
Returns:
Representation learned by the model for the given inputs and the labels and
masks. If `gather_to_host` is True, these are collected from all hosts.
"""
variables = {
'params': train_state.optimizer.target,
**train_state.model_state
}
representation_layer_parts = representation_layer.split('/')
filter_rep = lambda mdl, _: mdl.name == representation_layer_parts[-1]
_, model_state = flax_model.apply(
variables,
batch,
train=False,
capture_intermediates=filter_rep,
mutable=['intermediates'],
transfer_mode=True,
debug=False)
if 'intermediates' not in model_state:
raise ValueError(f'Layer with name "{representation_layer}"'
' does not exist in your model.')
representation = model_state['intermediates']
for rep_layer in representation_layer_parts:
if rep_layer:
representation = representation[rep_layer]
representation = representation['__call__'][0]
if gather_to_host:
representation = jax.lax.all_gather(representation, 'batch')
batch = jax.lax.all_gather(batch, 'batch')
return representation, batch['label'], batch['batch_mask']
def train(
*,
rng: jnp.ndarray,
config: ml_collections.ConfigDict,
model_cls: Type[bert_base_model.BERTBaseModel],
dataset: dataset_utils.Dataset,
workdir: str,
writer: metric_writers.MetricWriter,
) -> Tuple[train_utils.TrainState, Dict[str, Any], Dict[str, Any]]:
"""Main training loop lives in this function.
Given the model class and dataset, it prepares the items needed to run the
training, including the TrainState.
Args:
rng: Jax rng key.
config: Configurations of the experiment.
model_cls: Model class; A model has a flax_module, a loss_fn, and a
metrics_fn associated with it.
dataset: The dataset that has train_iter, eval_iter, meta_data, and
optionally, test_iter.
workdir: Directory for checkpointing.
writer: CLU metrics writer instance.
Returns:
train_state that has the state of training (including current
global_step, model_state, rng, and the optimizer), train_summary
and eval_summary which are dict of metrics. These outputs are used for
regression testing.
"""
lead_host = jax.process_index() == 0
# Build the loss_fn, metrics, and flax_model.
model = model_cls(config, dataset.meta_data)
# Initialize model.
rng, init_rng = jax.random.split(rng)
(params, model_state, num_trainable_params,
gflops) = bert_train_utils.initialize_bert_model(
model_def=model.flax_model,
input_spec=dataset.meta_data['input_spec'],
config=config,
rngs=init_rng)
# Create optimizer.
# We jit this, such that the arrays that are created are created on the same
# device as the input is, in this case the CPU. Else they'd be on device[0].
optimizer = jax.jit(
optimizers.get_optimizer(config).create, backend='cpu')(
params)
rng, train_rng = jax.random.split(rng)
train_state = train_utils.TrainState(
global_step=0,
optimizer=optimizer,
model_state=model_state,
rng=train_rng,
accum_train_time=0)
start_step = train_state.global_step
if config.checkpoint:
train_state, start_step = train_utils.restore_checkpoint(
workdir, train_state)
if (start_step == 0 # Which means "no" checkpoint is restored!
and config.get('init_from') is not None):
restored_model_cfg = config.init_from.get('model_config')
init_checkpoint_path = config.init_from.get('checkpoint_path')
restored_train_state = pretrain_utils.restore_pretrained_checkpoint(
init_checkpoint_path, train_state, assert_exist=True)
# Load params from the init_model.
train_state = model.init_from_train_state( # pytype: disable=attribute-error
train_state, restored_train_state, restored_model_cfg)
del restored_train_state
# Replicate the optimzier, state, and rng.
train_state = jax_utils.replicate(train_state)
del params # Do not keep a copy of the initial params.
# Calculate the total number of training steps.
total_steps, steps_per_epoch = train_utils.get_num_training_steps(
config, dataset.meta_data)
# Get learning rate scheduler.
learning_rate_fn = lr_schedules.get_learning_rate_fn(config)
train_step_pmapped = jax.pmap(
functools.partial(
train_step,
flax_model=model.flax_model,
learning_rate_fn=learning_rate_fn,
loss_fn=model.loss_function,
metrics_fn=model.get_metrics_fn('train'),
config=config,
debug=config.debug_train),
axis_name='batch',
# We can donate both buffers of train_state and train_batch.
donate_argnums=(0, 1),
)
eval_step_pmapped = jax.pmap(
functools.partial(
eval_step,
flax_model=model.flax_model,
metrics_fn=model.get_metrics_fn('validation'),
all_gather=config.get('global_metrics', False),
debug=config.debug_eval),
axis_name='batch',
# We can donate the eval_batch's buffer.
donate_argnums=(1,),
)
if 'fewshot' in config:
representation_fn_pmaped = jax.pmap(
functools.partial(
representation_fn,
flax_model=model.flax_model,
representation_layer=config.fewshot.representation_layer),
# We can donate the batch's buffer.
donate_argnums=(1,),
axis_name='batch')
fewshotter = bert_train_utils.BERTFewShotEvaluator(representation_fn_pmaped,
config.fewshot)
log_eval_steps = config.get('log_eval_steps') or steps_per_epoch
if not log_eval_steps:
raise ValueError("'log_eval_steps' should be specified in the config.")
checkpoint_steps = config.get('checkpoint_steps') or log_eval_steps
log_summary_steps = config.get('log_summary_steps') or log_eval_steps
# Ceil rounding such that we include the last incomplete batch.
total_eval_steps = int(
np.ceil(dataset.meta_data['num_eval_examples'] / config.batch_size))
steps_per_eval = config.get('steps_per_eval') or total_eval_steps
# If `global_metrics` are set in the config and we are the the lead host
compute_global_metrics = False
if config.get('global_metrics', False) and lead_host:
compute_global_metrics = True
if compute_global_metrics:
global_metrics_evaluator = bert_train_utils.BERTGlobalEvaluator(
config.global_metrics)
train_metrics, extra_training_logs = [], []
train_summary, eval_summary = None, None
chrono = train_utils.Chrono(
first_step=start_step,
total_steps=total_steps,
steps_per_epoch=steps_per_epoch,
global_bs=config.batch_size,
accum_train_time=int(jax_utils.unreplicate(train_state.accum_train_time)),
example_type='example')
logging.info('Starting training loop at step %d.', start_step + 1)
report_progress = periodic_actions.ReportProgress(
num_train_steps=total_steps, writer=writer)
hooks = [report_progress]
if config.get('xprof', True) and lead_host:
hooks.append(periodic_actions.Profile(num_profile_steps=5, logdir=workdir))
if start_step == 0:
step0_log = {'num_trainable_params': num_trainable_params}
if gflops:
step0_log['gflops'] = gflops
writer.write_scalars(1, step0_log)
for step in range(start_step + 1, total_steps + 1):
with jax.profiler.StepTraceContext('train', step_num=step):
train_batch = next(dataset.train_iter)
train_state, t_metrics, lr = train_step_pmapped(
train_state=train_state, batch=train_batch)
# This will accumulate metrics in TPU memory up to the point that we log
# them. This is no problem for small metrics but may be a problem for
# large (e.g. segmentation) metrics. An alternative is to set
# `log_summary_steps` to a small number, or to use
# `train_utils.unreplicate_and_get` here instead of right before writing
# summaries, but that means in each step, we have data transfer between
# tpu and host, which might slow down the training.
train_metrics.append(t_metrics)
# Additional training logs: learning rate:
extra_training_logs.append({'learning_rate': lr})
for h in hooks:
h(step)
chrono.pause() # Below are once-in-a-while ops -> pause.
###################### LOG TRAIN SUMMARY ########################
if (step % log_summary_steps == 1) or (step == total_steps):
if lead_host:
chrono.tick(step, writer=writer)
# train_metrics is list of a dictionaries of metrics, where the shape of
# the metrics[key] is [n_local_devices]. However, because metric functions
# have a psum, we have already summed across the whole sharded batch, and
# what's returned is n_local_devices copies of the same summed metric.
# So we do unreplicate and fetch them to host using `unreplicate_and_get`.
train_summary = train_utils.log_train_summary(
step=step,
train_metrics=jax.tree_map(train_utils.unreplicate_and_get,
train_metrics),
extra_training_logs=jax.tree_map(train_utils.unreplicate_and_get,
extra_training_logs),
writer=writer)
# Reset metric accumulation for next evaluation cycle.
train_metrics, extra_training_logs = [], []
################### EVALUATION #######################
if (step % log_eval_steps == 1) or (step == total_steps):
with report_progress.timed('eval'):
eval_metrics = []
# Sync model state across replicas.
train_state = train_utils.sync_model_state_across_replicas(
train_state)
for _ in range(steps_per_eval):
eval_batch = next(dataset.valid_iter)
e_metrics, e_output, e_batch = eval_step_pmapped(
train_state=train_state, batch=eval_batch)
eval_metrics.append(train_utils.unreplicate_and_get(e_metrics))
if compute_global_metrics:
# Unreplicate outputs of eval_step_pmapped that are coming from
# `lax.all_gather`, fetch to the host and add to the Evaluator:
e_batch_mask = train_utils.unreplicate_and_get(
e_batch['batch_mask']).astype(bool)
# Classification: 'label', regression: 'target'
t_key = 'label' if 'label' in e_batch else 'targets'
global_metrics_evaluator.add_batch_of_examples(
target=train_utils.unreplicate_and_get(
e_batch[t_key])[e_batch_mask],
output=train_utils.unreplicate_and_get(e_output)
[e_batch_mask])
del e_batch, e_output, e_batch_mask
eval_global_metrics_summary = None
if compute_global_metrics:
if (len(global_metrics_evaluator) !=
dataset.meta_data['num_eval_examples']):
# Make sure no example is lost (specially in multi-host setup).
raise ValueError(f'Number of eval examples should be '
f'{dataset.meta_data["num_eval_examples"]}, '
f'but it is {len(global_metrics_evaluator)}.')
eval_global_metrics_summary = (
global_metrics_evaluator.compute_metrics(
clear_annotations=True))
eval_summary = train_utils.log_eval_summary(
step=step,
eval_metrics=eval_metrics,
extra_eval_summary=eval_global_metrics_summary,
writer=writer)
writer.flush()
del eval_metrics, eval_global_metrics_summary
##################### CHECKPOINTING ###################
if ((step % checkpoint_steps == 0 and step > 0) or
(step == total_steps)) and config.checkpoint:
with report_progress.timed('checkpoint'):
# Sync model state across replicas.
train_state = train_utils.sync_model_state_across_replicas(train_state)
if lead_host:
train_state.replace( # pytype: disable=attribute-error
accum_train_time=chrono.accum_train_time)
train_utils.save_checkpoint(workdir, train_state)
##################### FEWSHOT EVALUATION ############################
if 'fewshot' in config:
# Compute few-shot on-the-fly evaluation.
if (step % config.fewshot.log_eval_steps == 1) or (step == total_steps):
with report_progress.timed('fewshot'):
results = fewshotter.run_all(train_state, config.fewshot.datasets)
fewshotter.log_fewshot_summary(
writer=writer, step=step, results=results)
del results
writer.write_scalars(step, {'zz/epoch': step / steps_per_epoch})
writer.flush()
chrono.resume() # un-pause now
# Wait until computations are done before exiting.
jax.random.normal(jax.random.PRNGKey(0), ()).block_until_ready()
# Return the train and eval summary after last step for regresesion testing.
return train_state, train_summary, eval_summary
| 41.412475
| 81
| 0.694782
|
import functools
from typing import Any, Callable, Dict, Tuple, Optional, Type
from absl import logging
from clu import metric_writers
from clu import periodic_actions
from flax import jax_utils
import flax.linen as nn
import jax
from jax.experimental import optimizers as jax_optimizers
import jax.numpy as jnp
import jax.profiler
import ml_collections
import numpy as np
from scenic.dataset_lib import dataset_utils
from scenic.projects.baselines.bert import bert_base_model
from scenic.projects.baselines.bert import train_utils as bert_train_utils
from scenic.train_lib import lr_schedules
from scenic.train_lib import optimizers
from scenic.train_lib import pretrain_utils
from scenic.train_lib import train_utils
def train_step(
*,
flax_model: nn.Module,
train_state: train_utils.TrainState,
batch: bert_base_model.Batch,
learning_rate_fn: Callable[[int], float],
loss_fn: bert_base_model.LossFn,
metrics_fn: bert_base_model.MetricFn,
config: ml_collections.ConfigDict,
debug: Optional[bool] = False
) -> Tuple[train_utils.TrainState, Dict[str, Tuple[float, int]], float]:
new_rng, rng = jax.random.split(train_state.rng)
dropout_rng = train_utils.bind_rng_to_host_device(
rng, axis_name='batch', bind_to='device')
def training_loss_fn(params):
variables = {'params': params, **train_state.model_state}
output, new_model_state = flax_model.apply(
variables,
batch,
mutable=['batch_stats'],
train=True,
rngs={'dropout': dropout_rng},
debug=debug)
loss = loss_fn(output, batch, variables['params'])
return loss, (new_model_state, output)
compute_gradient_fn = jax.value_and_grad(training_loss_fn, has_aux=True)
step = train_state.global_step
lr = learning_rate_fn(step)
(train_cost,
(new_model_state,
output)), grad = compute_gradient_fn(train_state.optimizer.target)
del train_cost
if config.get('max_grad_norm', None) is not None:
grad = jax_optimizers.clip_grads(grad, config.max_grad_norm)
grad = jax.lax.pmean(grad, axis_name='batch')
new_optimizer = train_state.optimizer.apply_gradient(grad, learning_rate=lr)
if config.get('explicit_weight_decay', None) is not None:
new_optimizer = new_optimizer.replace(
target=optimizers.tree_map_with_names(
functools.partial(
optimizers.decay_weight_fn,
lr=lr,
decay=config.explicit_weight_decay),
new_optimizer.target,
match_name_fn=lambda name: 'kernel' in name))
metrics = metrics_fn(output, batch)
new_train_state = train_state.replace(
global_step=step + 1,
optimizer=new_optimizer,
model_state=new_model_state,
rng=new_rng)
return new_train_state, metrics, lr
def eval_step(
*,
flax_model: nn.Module,
train_state: train_utils.TrainState,
batch: bert_base_model.Batch,
metrics_fn: bert_base_model.MetricFn,
all_gather: bool = False,
debug: Optional[bool] = False
) -> Tuple[Dict[str, Tuple[float, int]], Optional[jnp.ndarray],
Optional[jnp.ndarray]]:
variables = {
'params': train_state.optimizer.target,
**train_state.model_state
}
output = flax_model.apply(
variables, batch, train=False, mutable=False, debug=debug)
metrics = metrics_fn(output, batch)
if all_gather:
output = jax.lax.all_gather(output, 'batch')
batch = jax.lax.all_gather(batch, 'batch')
return metrics, output, batch
else:
return metrics, None, None
def representation_fn(
*,
flax_model: nn.Module,
train_state: train_utils.TrainState,
batch: bert_base_model.Batch,
representation_layer: str,
gather_to_host: bool = True
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
variables = {
'params': train_state.optimizer.target,
**train_state.model_state
}
representation_layer_parts = representation_layer.split('/')
filter_rep = lambda mdl, _: mdl.name == representation_layer_parts[-1]
_, model_state = flax_model.apply(
variables,
batch,
train=False,
capture_intermediates=filter_rep,
mutable=['intermediates'],
transfer_mode=True,
debug=False)
if 'intermediates' not in model_state:
raise ValueError(f'Layer with name "{representation_layer}"'
' does not exist in your model.')
representation = model_state['intermediates']
for rep_layer in representation_layer_parts:
if rep_layer:
representation = representation[rep_layer]
representation = representation['__call__'][0]
if gather_to_host:
representation = jax.lax.all_gather(representation, 'batch')
batch = jax.lax.all_gather(batch, 'batch')
return representation, batch['label'], batch['batch_mask']
def train(
*,
rng: jnp.ndarray,
config: ml_collections.ConfigDict,
model_cls: Type[bert_base_model.BERTBaseModel],
dataset: dataset_utils.Dataset,
workdir: str,
writer: metric_writers.MetricWriter,
) -> Tuple[train_utils.TrainState, Dict[str, Any], Dict[str, Any]]:
lead_host = jax.process_index() == 0
model = model_cls(config, dataset.meta_data)
rng, init_rng = jax.random.split(rng)
(params, model_state, num_trainable_params,
gflops) = bert_train_utils.initialize_bert_model(
model_def=model.flax_model,
input_spec=dataset.meta_data['input_spec'],
config=config,
rngs=init_rng)
optimizer = jax.jit(
optimizers.get_optimizer(config).create, backend='cpu')(
params)
rng, train_rng = jax.random.split(rng)
train_state = train_utils.TrainState(
global_step=0,
optimizer=optimizer,
model_state=model_state,
rng=train_rng,
accum_train_time=0)
start_step = train_state.global_step
if config.checkpoint:
train_state, start_step = train_utils.restore_checkpoint(
workdir, train_state)
if (start_step == 0 # Which means "no" checkpoint is restored!
and config.get('init_from') is not None):
restored_model_cfg = config.init_from.get('model_config')
init_checkpoint_path = config.init_from.get('checkpoint_path')
restored_train_state = pretrain_utils.restore_pretrained_checkpoint(
init_checkpoint_path, train_state, assert_exist=True)
# Load params from the init_model.
train_state = model.init_from_train_state( # pytype: disable=attribute-error
train_state, restored_train_state, restored_model_cfg)
del restored_train_state
# Replicate the optimzier, state, and rng.
train_state = jax_utils.replicate(train_state)
del params # Do not keep a copy of the initial params.
# Calculate the total number of training steps.
total_steps, steps_per_epoch = train_utils.get_num_training_steps(
config, dataset.meta_data)
# Get learning rate scheduler.
learning_rate_fn = lr_schedules.get_learning_rate_fn(config)
train_step_pmapped = jax.pmap(
functools.partial(
train_step,
flax_model=model.flax_model,
learning_rate_fn=learning_rate_fn,
loss_fn=model.loss_function,
metrics_fn=model.get_metrics_fn('train'),
config=config,
debug=config.debug_train),
axis_name='batch',
# We can donate both buffers of train_state and train_batch.
donate_argnums=(0, 1),
)
eval_step_pmapped = jax.pmap(
functools.partial(
eval_step,
flax_model=model.flax_model,
metrics_fn=model.get_metrics_fn('validation'),
all_gather=config.get('global_metrics', False),
debug=config.debug_eval),
axis_name='batch',
# We can donate the eval_batch's buffer.
donate_argnums=(1,),
)
if 'fewshot' in config:
representation_fn_pmaped = jax.pmap(
functools.partial(
representation_fn,
flax_model=model.flax_model,
representation_layer=config.fewshot.representation_layer),
donate_argnums=(1,),
axis_name='batch')
fewshotter = bert_train_utils.BERTFewShotEvaluator(representation_fn_pmaped,
config.fewshot)
log_eval_steps = config.get('log_eval_steps') or steps_per_epoch
if not log_eval_steps:
raise ValueError("'log_eval_steps' should be specified in the config.")
checkpoint_steps = config.get('checkpoint_steps') or log_eval_steps
log_summary_steps = config.get('log_summary_steps') or log_eval_steps
# Ceil rounding such that we include the last incomplete batch.
total_eval_steps = int(
np.ceil(dataset.meta_data['num_eval_examples'] / config.batch_size))
steps_per_eval = config.get('steps_per_eval') or total_eval_steps
# If `global_metrics` are set in the config and we are the the lead host
compute_global_metrics = False
if config.get('global_metrics', False) and lead_host:
compute_global_metrics = True
if compute_global_metrics:
global_metrics_evaluator = bert_train_utils.BERTGlobalEvaluator(
config.global_metrics)
train_metrics, extra_training_logs = [], []
train_summary, eval_summary = None, None
chrono = train_utils.Chrono(
first_step=start_step,
total_steps=total_steps,
steps_per_epoch=steps_per_epoch,
global_bs=config.batch_size,
accum_train_time=int(jax_utils.unreplicate(train_state.accum_train_time)),
example_type='example')
logging.info('Starting training loop at step %d.', start_step + 1)
report_progress = periodic_actions.ReportProgress(
num_train_steps=total_steps, writer=writer)
hooks = [report_progress]
if config.get('xprof', True) and lead_host:
hooks.append(periodic_actions.Profile(num_profile_steps=5, logdir=workdir))
if start_step == 0:
step0_log = {'num_trainable_params': num_trainable_params}
if gflops:
step0_log['gflops'] = gflops
writer.write_scalars(1, step0_log)
for step in range(start_step + 1, total_steps + 1):
with jax.profiler.StepTraceContext('train', step_num=step):
train_batch = next(dataset.train_iter)
train_state, t_metrics, lr = train_step_pmapped(
train_state=train_state, batch=train_batch)
# This will accumulate metrics in TPU memory up to the point that we log
# them. This is no problem for small metrics but may be a problem for
# large (e.g. segmentation) metrics. An alternative is to set
# `log_summary_steps` to a small number, or to use
# `train_utils.unreplicate_and_get` here instead of right before writing
# summaries, but that means in each step, we have data transfer between
# tpu and host, which might slow down the training.
train_metrics.append(t_metrics)
# Additional training logs: learning rate:
extra_training_logs.append({'learning_rate': lr})
for h in hooks:
h(step)
chrono.pause() # Below are once-in-a-while ops -> pause.
###################### LOG TRAIN SUMMARY ########################
if (step % log_summary_steps == 1) or (step == total_steps):
if lead_host:
chrono.tick(step, writer=writer)
# train_metrics is list of a dictionaries of metrics, where the shape of
# the metrics[key] is [n_local_devices]. However, because metric functions
# have a psum, we have already summed across the whole sharded batch, and
# what's returned is n_local_devices copies of the same summed metric.
train_summary = train_utils.log_train_summary(
step=step,
train_metrics=jax.tree_map(train_utils.unreplicate_and_get,
train_metrics),
extra_training_logs=jax.tree_map(train_utils.unreplicate_and_get,
extra_training_logs),
writer=writer)
train_metrics, extra_training_logs = [], []
del e_batch, e_output, e_batch_mask
eval_global_metrics_summary = None
if compute_global_metrics:
if (len(global_metrics_evaluator) !=
dataset.meta_data['num_eval_examples']):
raise ValueError(f'Number of eval examples should be '
f'{dataset.meta_data["num_eval_examples"]}, '
f'but it is {len(global_metrics_evaluator)}.')
eval_global_metrics_summary = (
global_metrics_evaluator.compute_metrics(
clear_annotations=True))
eval_summary = train_utils.log_eval_summary(
step=step,
eval_metrics=eval_metrics,
extra_eval_summary=eval_global_metrics_summary,
writer=writer)
writer.flush()
del eval_metrics, eval_global_metrics_summary
| true
| true
|
79052e85084d93d29a6482dacc17390c9ff20a10
| 220
|
py
|
Python
|
stock_notifier/stock_config/stock_config.py
|
saswatraj/stock_notifier
|
f0b05acf77acd6605b4d022e64ddd747d9a5540f
|
[
"MIT"
] | null | null | null |
stock_notifier/stock_config/stock_config.py
|
saswatraj/stock_notifier
|
f0b05acf77acd6605b4d022e64ddd747d9a5540f
|
[
"MIT"
] | null | null | null |
stock_notifier/stock_config/stock_config.py
|
saswatraj/stock_notifier
|
f0b05acf77acd6605b4d022e64ddd747d9a5540f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Base class for a stock configuration.
@author: rajsaswa
"""
class StockConfig:
def __init__(self):
pass
def get_stock_url(self):
pass
| 14.666667
| 37
| 0.590909
|
class StockConfig:
def __init__(self):
pass
def get_stock_url(self):
pass
| true
| true
|
79052ef7ce4a002a27701ac148f1ec3ae92603c5
| 2,524
|
py
|
Python
|
proximal/examples/test_conv.py
|
kyleaj/ProxImaL
|
2986b1ed40b58057822922522145bfbbdd2cf9de
|
[
"MIT"
] | 101
|
2016-07-24T00:33:12.000Z
|
2022-03-23T23:51:58.000Z
|
proximal/examples/test_conv.py
|
kyleaj/ProxImaL
|
2986b1ed40b58057822922522145bfbbdd2cf9de
|
[
"MIT"
] | 57
|
2016-07-26T18:12:37.000Z
|
2022-02-14T04:19:26.000Z
|
proximal/examples/test_conv.py
|
kyleaj/ProxImaL
|
2986b1ed40b58057822922522145bfbbdd2cf9de
|
[
"MIT"
] | 30
|
2016-07-26T22:51:59.000Z
|
2021-01-15T14:45:42.000Z
|
# Proximal
import sys
sys.path.append('../../')
from proximal.utils.utils import *
from proximal.halide.halide import *
from proximal.lin_ops import *
import numpy as np
from scipy import signal
from scipy import ndimage
import matplotlib.pyplot as plt
############################################################
# Load image
np_img = get_test_image(2048)
print('Type ', np_img.dtype, 'Shape', np_img.shape)
imgplot = plt.imshow(np_img, interpolation='nearest', clim=(0.0, 1.0))
imgplot.set_cmap('gray')
plt.title('Numpy')
# Force recompile in local dir
tic()
Halide('A_conv', recompile=True)
Halide('At_conv', recompile=True) # Force recompile in local dir
print('Compilation took: {0:.1f}ms'.format(toc()))
# Test the runner
output = np.zeros_like(np_img)
K = get_kernel(15, len(np_img.shape))
tic()
Halide('A_conv').A_conv(np_img, K, output) # Call
print('Running took: {0:.1f}ms'.format(toc()))
plt.figure()
imgplot = plt.imshow(output, interpolation='nearest', clim=(0.0, 1.0))
imgplot.set_cmap('gray')
plt.title('Output from Halide')
tic()
output_scipy = signal.convolve2d(np_img, K, mode='same', boundary='wrap')
print('Running Scipy.convolve2d took: {0:.1f}ms'.format(toc()))
fn = conv(K, Variable(np_img.shape), implem='halide')
output_ref = np.zeros(np_img.shape, dtype=np.float32, order='F')
tic()
fn.forward([np_img], [output_ref])
print('Running conv fft convolution took: {0:.1f}ms'.format(toc()))
# Error
print('Maximum error {0}'.format(np.amax(np.abs(output_ref - output))))
plt.figure()
imgplot = plt.imshow(output_ref * 255,
interpolation='nearest',
clim=(0.0, 255.0))
imgplot.set_cmap('gray')
plt.title('Output from Scipy')
############################################################################
# Check correlation
############################################################################
output_corr = np.zeros_like(np_img)
tic()
Halide('At_conv').At_conv(np_img, K, output_corr) # Call
print('Running correlation took: {0:.1f}ms'.format(toc()))
#output_corr_ref = signal.convolve2d(np_img, np.flipud(np.fliplr(K)), mode='same', boundary='wrap')
output_corr_ref = ndimage.correlate(np_img, K, mode='wrap')
# Adjoint.
output_corr_ref = np.zeros(np_img.shape, dtype=np.float32, order='F')
tic()
fn.adjoint([np_img], [output_corr_ref])
print('Running transpose conv fft convolution took: {0:.1f}ms'.format(toc()))
# Error
print('Maximum error correlation {0}'.format(
np.amax(np.abs(output_corr_ref - output_corr))))
plt.show()
| 29.348837
| 99
| 0.647781
|
import sys
sys.path.append('../../')
from proximal.utils.utils import *
from proximal.halide.halide import *
from proximal.lin_ops import *
import numpy as np
from scipy import signal
from scipy import ndimage
import matplotlib.pyplot as plt
| true
| true
|
79052f08df6a4ffd138a2a2c2dbbf28a768aab17
| 2,643
|
py
|
Python
|
project/knowledge_graph_embedding/project_distmult_rotate_transe/service.py
|
jh-lau/solid_ai_waddle
|
b966f2c6e8b6b48c62064d58461692231aa2116b
|
[
"MIT"
] | null | null | null |
project/knowledge_graph_embedding/project_distmult_rotate_transe/service.py
|
jh-lau/solid_ai_waddle
|
b966f2c6e8b6b48c62064d58461692231aa2116b
|
[
"MIT"
] | null | null | null |
project/knowledge_graph_embedding/project_distmult_rotate_transe/service.py
|
jh-lau/solid_ai_waddle
|
b966f2c6e8b6b48c62064d58461692231aa2116b
|
[
"MIT"
] | null | null | null |
"""
@Author : liujianhan
@Date : 2018/5/15 上午10:48
@Project : KGE
@FileName : service.py
@Description : 服务接口模块
"""
import codecs
import json
import os
import time
from typing import Dict
import torch
from dotmap import DotMap
from .core.predict import get_entity_relation_with_id
from .layer.model import KGEModel
kge_model, entity2id, id2entity, relation2id, all_true_triples, args = None, None, None, None, None, None
def load_model(model_path: str) -> None:
"""
模型加载
@param model_path: 模型文件夹路径
@return:
"""
global kge_model, entity2id, id2entity, relation2id, all_true_triples, args
args = DotMap(json.load(codecs.open(os.path.join(model_path, 'config.json'), 'r', encoding='utf-8')))
entity2id, id2entity, relation2id, id2relation, all_true_triples = get_entity_relation_with_id(args.data_path)
kge_model = KGEModel(
model_name=args.model,
nentity=args.nentity,
nrelation=args.nrelation,
hidden_dim=args.hidden_dim,
gamma=args.gamma,
double_entity_embedding=args.double_entity_embedding,
double_relation_embedding=args.double_relation_embedding
)
if args.cuda:
kge_model = kge_model.cuda()
checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint'))
kge_model.load_state_dict(checkpoint['model_state_dict'])
def inference(target_triple: str) -> Dict:
"""
推理函数
@param target_triple: 目标需预测三元组:'头实体 关系 尾实体'
@return: 头尾实体的10个预测结果
"""
if kge_model is None:
return {'预测结果': '提醒:模型未加载'}
try:
target_triple = target_triple.split()
head = entity2id[target_triple[0]]
tail = entity2id[target_triple[2]]
relation = relation2id[target_triple[1]]
target_triple = [(head, relation, tail)]
except KeyError as e:
return {'预测结果': f'实体或者关系 <{e}> 不存在,请确保输入的实体或者关系已存在。'}
prediction = kge_model.test_step(kge_model, target_triple, all_true_triples, args, True)
head_entity_prediction = [id2entity[str(idx)] for idx in prediction['head_predict']]
tail_entity_prediction = [id2entity[str(idx)] for idx in prediction['tail_predict']]
result = {'头实体预测结果': head_entity_prediction, '尾实体预测结果': tail_entity_prediction}
return result
if __name__ == '__main__':
t1 = time.time()
load_model('data_path/model/DistMult_cn_military_300k_10')
test_cases = [
'摩耶号/Maya巡洋舰 建造时间 1928年',
'1949年2月28日 星座 双鱼座'
]
t2 = time.time()
res = inference(test_cases[0])
print(f'模型加载耗时: {t2 - t1: .3}s')
print(f'推理耗时: {time.time() - t2: .3}s')
print(res)
| 30.732558
| 114
| 0.678396
|
import codecs
import json
import os
import time
from typing import Dict
import torch
from dotmap import DotMap
from .core.predict import get_entity_relation_with_id
from .layer.model import KGEModel
kge_model, entity2id, id2entity, relation2id, all_true_triples, args = None, None, None, None, None, None
def load_model(model_path: str) -> None:
global kge_model, entity2id, id2entity, relation2id, all_true_triples, args
args = DotMap(json.load(codecs.open(os.path.join(model_path, 'config.json'), 'r', encoding='utf-8')))
entity2id, id2entity, relation2id, id2relation, all_true_triples = get_entity_relation_with_id(args.data_path)
kge_model = KGEModel(
model_name=args.model,
nentity=args.nentity,
nrelation=args.nrelation,
hidden_dim=args.hidden_dim,
gamma=args.gamma,
double_entity_embedding=args.double_entity_embedding,
double_relation_embedding=args.double_relation_embedding
)
if args.cuda:
kge_model = kge_model.cuda()
checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint'))
kge_model.load_state_dict(checkpoint['model_state_dict'])
def inference(target_triple: str) -> Dict:
if kge_model is None:
return {'预测结果': '提醒:模型未加载'}
try:
target_triple = target_triple.split()
head = entity2id[target_triple[0]]
tail = entity2id[target_triple[2]]
relation = relation2id[target_triple[1]]
target_triple = [(head, relation, tail)]
except KeyError as e:
return {'预测结果': f'实体或者关系 <{e}> 不存在,请确保输入的实体或者关系已存在。'}
prediction = kge_model.test_step(kge_model, target_triple, all_true_triples, args, True)
head_entity_prediction = [id2entity[str(idx)] for idx in prediction['head_predict']]
tail_entity_prediction = [id2entity[str(idx)] for idx in prediction['tail_predict']]
result = {'头实体预测结果': head_entity_prediction, '尾实体预测结果': tail_entity_prediction}
return result
if __name__ == '__main__':
t1 = time.time()
load_model('data_path/model/DistMult_cn_military_300k_10')
test_cases = [
'摩耶号/Maya巡洋舰 建造时间 1928年',
'1949年2月28日 星座 双鱼座'
]
t2 = time.time()
res = inference(test_cases[0])
print(f'模型加载耗时: {t2 - t1: .3}s')
print(f'推理耗时: {time.time() - t2: .3}s')
print(res)
| true
| true
|
790530615bdc29511838b1fd6f0a1563f718c38f
| 401
|
py
|
Python
|
code/platforms/mac/user.py
|
palexjo/pokey_talon
|
b143314850271e185968b12f6e224df1cbb4611c
|
[
"MIT"
] | null | null | null |
code/platforms/mac/user.py
|
palexjo/pokey_talon
|
b143314850271e185968b12f6e224df1cbb4611c
|
[
"MIT"
] | null | null | null |
code/platforms/mac/user.py
|
palexjo/pokey_talon
|
b143314850271e185968b12f6e224df1cbb4611c
|
[
"MIT"
] | null | null | null |
from talon import Module, Context
import appscript
mod = Module()
ctx = Context()
ctx.matches = r"""
os: mac
"""
@mod.action_class
class Actions:
def run_shortcut(name: str):
"""Runs a shortcut on macOS"""
pass
@ctx.action_class("user")
class UserActions:
def run_shortcut(name: str):
appscript.app(id='com.apple.shortcuts.events').shortcuts[name].run_()
| 17.434783
| 77
| 0.653367
|
from talon import Module, Context
import appscript
mod = Module()
ctx = Context()
ctx.matches = r"""
os: mac
"""
@mod.action_class
class Actions:
def run_shortcut(name: str):
pass
@ctx.action_class("user")
class UserActions:
def run_shortcut(name: str):
appscript.app(id='com.apple.shortcuts.events').shortcuts[name].run_()
| true
| true
|
790531271f6a58b28a98db5f3bcee970e2a08af6
| 2,221
|
py
|
Python
|
cs15211/DeleteNodrinaLinkedList.py
|
JulyKikuAkita/PythonPrac
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
[
"Apache-2.0"
] | 1
|
2021-07-05T01:53:30.000Z
|
2021-07-05T01:53:30.000Z
|
cs15211/DeleteNodrinaLinkedList.py
|
JulyKikuAkita/PythonPrac
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
[
"Apache-2.0"
] | null | null | null |
cs15211/DeleteNodrinaLinkedList.py
|
JulyKikuAkita/PythonPrac
|
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
|
[
"Apache-2.0"
] | 1
|
2018-01-08T07:14:08.000Z
|
2018-01-08T07:14:08.000Z
|
__source__ = 'https://leetcode.com/problems/delete-node-in-a-linked-list/description/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/delete-node-in-a-linked-list.py
# Time: O(1)
# Space: O(1)
#
# Description: Leetcode # 237. Delete Node in a Linked List
#
# Write a function to delete a node (except the tail) in a singly linked list,
# given only access to that node.
#
# Supposed the linked list is 1 -> 2 -> 3 -> 4 and you are given the third node
# with value 3, the linked list should become 1 -> 2 -> 4 after calling your function.
#
# Companies
# Adobe Apple Microsoft
# Related Topics
# Linked List
# Similar Questions
# Remove Linked List Elements
#
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
import unittest
class Solution:
# @param {ListNode} node
# @return {void} Do not return anything, modify node in-place instead.
def deleteNode(self, node):
if node and node.next:
node_to_delete = node.next
node.val = node_to_delete.val
node.next = node_to_delete.next
del node_to_delete
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/delete-node-in-a-linked-list/solution/
Thought: We can't really delete the node, but we can kinda achieve the same effect
by instead removing the next node after copying its data into the node that we were asked to delete.
/**
* Definition for singly-linked list.
* public class ListNode {
* int val;
* ListNode next;
* ListNode(int x) { val = x; }
* }
*/
# 0ms 100%
class Solution {
public void deleteNode(ListNode node) {
node.val = node.next.val;
node.next = node.next.next;
}
}
# 0ms 100%
class Solution {
public void deleteNode(ListNode node) {
if (node == null || node.next == null) {
return;
}
while (node.next.next != null) {
node.val = node.next.val;
node = node.next;
}
node.val = node.next.val;
node.next = null;
}
}
'''
| 27.085366
| 100
| 0.642954
|
__source__ = 'https://leetcode.com/problems/delete-node-in-a-linked-list/description/'
def __init__(self, x):
self.val = x
self.next = None
import unittest
class Solution:
def deleteNode(self, node):
if node and node.next:
node_to_delete = node.next
node.val = node_to_delete.val
node.next = node_to_delete.next
del node_to_delete
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/delete-node-in-a-linked-list/solution/
Thought: We can't really delete the node, but we can kinda achieve the same effect
by instead removing the next node after copying its data into the node that we were asked to delete.
/**
* Definition for singly-linked list.
* public class ListNode {
* int val;
* ListNode next;
* ListNode(int x) { val = x; }
* }
*/
# 0ms 100%
class Solution {
public void deleteNode(ListNode node) {
node.val = node.next.val;
node.next = node.next.next;
}
}
# 0ms 100%
class Solution {
public void deleteNode(ListNode node) {
if (node == null || node.next == null) {
return;
}
while (node.next.next != null) {
node.val = node.next.val;
node = node.next;
}
node.val = node.next.val;
node.next = null;
}
}
'''
| true
| true
|
790531c6e3177f6ce7d911c9af400d2d8d75273b
| 3,442
|
py
|
Python
|
src/portable_python/external/_inspect.py
|
codrsquad/portable-python
|
4ec94dc1ded85c367c6912f96c408b03d2d68a9c
|
[
"MIT"
] | 3
|
2022-01-04T13:58:53.000Z
|
2022-01-28T11:11:50.000Z
|
src/portable_python/external/_inspect.py
|
codrsquad/portable-python
|
4ec94dc1ded85c367c6912f96c408b03d2d68a9c
|
[
"MIT"
] | 3
|
2021-09-18T09:43:18.000Z
|
2022-01-04T12:58:05.000Z
|
src/portable_python/external/_inspect.py
|
codrsquad/portable-python
|
4ec94dc1ded85c367c6912f96c408b03d2d68a9c
|
[
"MIT"
] | 4
|
2021-09-03T06:55:31.000Z
|
2022-01-26T14:24:07.000Z
|
import json
import os
import re
import sys
import sysconfig
RX_VERSION = re.compile(r"\d\.\d")
INSIGHTS = {
"_gdbm": "_GDBM_VERSION",
"_tkinter": "TCL_VERSION TK_VERSION",
"_sqlite3": "sqlite_version version",
"_ssl": "OPENSSL_VERSION",
"dbm.gnu": "_GDBM_VERSION",
"ensurepip": "_PIP_VERSION",
"pyexpat": "version_info",
"readline": "_READLINE_LIBRARY_VERSION",
"tkinter": "TclVersion TkVersion",
"zlib": "ZLIB_VERSION ZLIB_RUNTIME_VERSION",
}
def get_version(text):
if text:
if isinstance(text, bytes):
text = text.decode("utf-8")
elif isinstance(text, tuple):
text = ".".join(str(x) for x in text)
else:
text = str(text)
if text and RX_VERSION.search(text):
return text.splitlines()[0]
def pymodule_version_info(key, value, pymodule):
version = get_version(value)
if version:
result = dict(version_field=key, version=version)
if hasattr(pymodule, "__file__"):
result["path"] = pymodule.__file__
return result
def pymodule_info(module_name, pymodule):
fields = INSIGHTS.get(module_name)
fields = fields.split() if fields else ["__version__", "version", "VERSION"]
for f in fields:
v = pymodule_version_info(f, getattr(pymodule, f, None), pymodule)
if v:
return v
if hasattr(pymodule, "__file__"):
return dict(path=pymodule.__file__)
if hasattr(pymodule, "__spec__"):
v = getattr(pymodule.__spec__, "origin")
if v == "built-in":
return dict(version=v)
return dict(note=str(dir(pymodule)))
def module_report(module_name):
try:
return pymodule_info(module_name, __import__(module_name))
except Exception as e:
note = str(e)
if "No module named" in note:
return dict(version="*absent*")
return dict(version="*absent*", note=note)
def get_srcdir():
srcdir = sysconfig.get_config_var("srcdir")
if not srcdir or len(srcdir) < 3:
srcdir = sysconfig.get_config_var("DESTSHARED") # edge case: py2 reports an odd '.' as srcdir
return srcdir
def get_simplified_dirs(path):
result = []
if path:
path = os.path.dirname(path)
result.append(path)
if path.startswith("/private"):
result.append(path[8:]) # whoever compiled didn't use realpath(tmp)
elif not path.startswith("/tmp"): # nosec, just simplifying paths
result.append(os.path.dirname(result[0]))
return result
def main(arg):
if arg == "sysconfig":
marker = "$^"
simplified_dirs = get_simplified_dirs(sysconfig.get_config_var("abs_builddir"))
if simplified_dirs:
print("# '%s' is original abs_builddir:" % marker)
print("%s: %s\n" % (marker, simplified_dirs[0]))
for k, v in sorted(sysconfig.get_config_vars().items()):
for sp in simplified_dirs:
v = str(v).replace(sp, marker)
print("%s: %s" % (k, v))
return
if arg and not arg.startswith("-"):
report = dict((k, module_report(k)) for k in arg.split(","))
report = dict(report=report, srcdir=get_srcdir(), prefix=sysconfig.get_config_var("prefix"))
print(json.dumps(report, indent=2, sort_keys=True))
if __name__ == "__main__":
main(sys.argv[1] if len(sys.argv) > 1 else "")
| 27.536
| 102
| 0.613597
|
import json
import os
import re
import sys
import sysconfig
RX_VERSION = re.compile(r"\d\.\d")
INSIGHTS = {
"_gdbm": "_GDBM_VERSION",
"_tkinter": "TCL_VERSION TK_VERSION",
"_sqlite3": "sqlite_version version",
"_ssl": "OPENSSL_VERSION",
"dbm.gnu": "_GDBM_VERSION",
"ensurepip": "_PIP_VERSION",
"pyexpat": "version_info",
"readline": "_READLINE_LIBRARY_VERSION",
"tkinter": "TclVersion TkVersion",
"zlib": "ZLIB_VERSION ZLIB_RUNTIME_VERSION",
}
def get_version(text):
if text:
if isinstance(text, bytes):
text = text.decode("utf-8")
elif isinstance(text, tuple):
text = ".".join(str(x) for x in text)
else:
text = str(text)
if text and RX_VERSION.search(text):
return text.splitlines()[0]
def pymodule_version_info(key, value, pymodule):
version = get_version(value)
if version:
result = dict(version_field=key, version=version)
if hasattr(pymodule, "__file__"):
result["path"] = pymodule.__file__
return result
def pymodule_info(module_name, pymodule):
fields = INSIGHTS.get(module_name)
fields = fields.split() if fields else ["__version__", "version", "VERSION"]
for f in fields:
v = pymodule_version_info(f, getattr(pymodule, f, None), pymodule)
if v:
return v
if hasattr(pymodule, "__file__"):
return dict(path=pymodule.__file__)
if hasattr(pymodule, "__spec__"):
v = getattr(pymodule.__spec__, "origin")
if v == "built-in":
return dict(version=v)
return dict(note=str(dir(pymodule)))
def module_report(module_name):
try:
return pymodule_info(module_name, __import__(module_name))
except Exception as e:
note = str(e)
if "No module named" in note:
return dict(version="*absent*")
return dict(version="*absent*", note=note)
def get_srcdir():
srcdir = sysconfig.get_config_var("srcdir")
if not srcdir or len(srcdir) < 3:
srcdir = sysconfig.get_config_var("DESTSHARED")
return srcdir
def get_simplified_dirs(path):
result = []
if path:
path = os.path.dirname(path)
result.append(path)
if path.startswith("/private"):
result.append(path[8:])
elif not path.startswith("/tmp"): # nosec, just simplifying paths
result.append(os.path.dirname(result[0]))
return result
def main(arg):
if arg == "sysconfig":
marker = "$^"
simplified_dirs = get_simplified_dirs(sysconfig.get_config_var("abs_builddir"))
if simplified_dirs:
print("# '%s' is original abs_builddir:" % marker)
print("%s: %s\n" % (marker, simplified_dirs[0]))
for k, v in sorted(sysconfig.get_config_vars().items()):
for sp in simplified_dirs:
v = str(v).replace(sp, marker)
print("%s: %s" % (k, v))
return
if arg and not arg.startswith("-"):
report = dict((k, module_report(k)) for k in arg.split(","))
report = dict(report=report, srcdir=get_srcdir(), prefix=sysconfig.get_config_var("prefix"))
print(json.dumps(report, indent=2, sort_keys=True))
if __name__ == "__main__":
main(sys.argv[1] if len(sys.argv) > 1 else "")
| true
| true
|
79053220cb7fa9c72a5b0d1cce68d7322874cdb2
| 4,653
|
py
|
Python
|
paypalrestsdkold/openid_connect.py
|
fdhoff/PayPal-Python-SDK
|
a46c87ac99680795c89590b7342d234633244156
|
[
"BSD-Source-Code"
] | null | null | null |
paypalrestsdkold/openid_connect.py
|
fdhoff/PayPal-Python-SDK
|
a46c87ac99680795c89590b7342d234633244156
|
[
"BSD-Source-Code"
] | null | null | null |
paypalrestsdkold/openid_connect.py
|
fdhoff/PayPal-Python-SDK
|
a46c87ac99680795c89590b7342d234633244156
|
[
"BSD-Source-Code"
] | null | null | null |
import paypalrestsdkold.util as util
from paypalrestsdkold.resource import Resource
from paypalrestsdkold.api import default as default_api
from paypalrestsdkold.api import Api
from paypalrestsdkold.config import __version__
from six import string_types
class Base(Resource):
user_agent = "PayPalSDK/openid-connect-python %s (%s)" % (__version__, Api.library_details)
@classmethod
def post(cls, action, options=None, headers=None, api=None):
api = api or default_api()
url = util.join_url(endpoint(api), action)
body = util.urlencode(options or {})
headers = util.merge_dict({
'User-Agent': cls.user_agent,
'Content-Type': 'application/x-www-form-urlencoded'}, headers or {})
data = api.http_call(url, 'POST', data=body, headers=headers)
return cls(data, api=api)
class Tokeninfo(Base):
"""Token service for Log In with PayPal, API docs at
https://developer.paypal.com/docs/api/#identity
"""
path = "v1/identity/openidconnect/tokenservice"
@classmethod
def create(cls, options=None, api=None):
options = options or {}
api = api or default_api()
if isinstance(options, string_types):
options = {'code': options}
options = util.merge_dict({
'grant_type': 'authorization_code',
'client_id': client_id(api),
'client_secret': client_secret(api)
}, options)
return cls.post(cls.path, options, api=api)
@classmethod
def create_with_refresh_token(cls, options=None, api=None):
options = options or {}
api = api or default_api()
if isinstance(options, string_types):
options = {'refresh_token': options}
options = util.merge_dict({
'grant_type': 'refresh_token',
'client_id': client_id(api),
'client_secret': client_secret(api)
}, options)
return cls.post(cls.path, options, api=api)
@classmethod
def authorize_url(cls, options=None, api=None):
return authorize_url(options or {}, api=api)
def logout_url(self, options=None, api=None):
return logout_url(util.merge_dict({'id_token': self.id_token}, options or {}), api=api)
def refresh(self, options=None, api=None):
options = util.merge_dict({'refresh_token': self.refresh_token}, options or {})
tokeninfo = self.__class__.create_with_refresh_token(options, api=api)
self.merge(tokeninfo.to_dict())
return self
def userinfo(self, options=None, api=None):
return Userinfo.get(util.merge_dict({'access_token': self.access_token}, options or {}), api=api)
class Userinfo(Base):
"""Retrive user profile attributes for Log In with PayPal
"""
path = "v1/identity/openidconnect/userinfo"
@classmethod
def get(cls, options=None, api=None):
options = options or {}
if isinstance(options, string_types):
options = {'access_token': options}
options = util.merge_dict({'schema': 'openid'}, options)
api = api or default_api()
return cls.post(cls.path, options, api=api)
def endpoint(api=None):
api = api or default_api()
return api.options.get("openid_endpoint", api.endpoint)
def client_id(api=None):
api = api or default_api()
return api.options.get("openid_client_id", api.client_id)
def client_secret(api=None):
api = api or default_api()
return api.options.get("openid_client_secret", api.client_secret)
def redirect_uri(api=None):
api = api or default_api()
return api.options.get("openid_redirect_uri")
start_session_path = "/signin/authorize"
end_session_path = "/webapps/auth/protocol/openidconnect/v1/endsession"
def session_url(path, options=None, api=None):
api = api or default_api()
if api.mode == "live":
path = util.join_url("https://www.paypal.com", path)
else:
path = util.join_url("https://www.sandbox.paypal.com", path)
return util.join_url_params(path, options or {})
def authorize_url(options=None, api=None):
api = api or default_api()
options = util.merge_dict({
'response_type': 'code',
'scope': 'openid',
'client_id': client_id(api),
'redirect_uri': redirect_uri(api)
}, options or {})
return session_url(start_session_path, options, api=api)
def logout_url(options=None, api=None):
api = api or default_api()
options = util.merge_dict({
'logout': 'true',
'redirect_uri': redirect_uri(api)
}, options or {})
return session_url(end_session_path, options, api=api)
| 32.538462
| 105
| 0.656995
|
import paypalrestsdkold.util as util
from paypalrestsdkold.resource import Resource
from paypalrestsdkold.api import default as default_api
from paypalrestsdkold.api import Api
from paypalrestsdkold.config import __version__
from six import string_types
class Base(Resource):
user_agent = "PayPalSDK/openid-connect-python %s (%s)" % (__version__, Api.library_details)
@classmethod
def post(cls, action, options=None, headers=None, api=None):
api = api or default_api()
url = util.join_url(endpoint(api), action)
body = util.urlencode(options or {})
headers = util.merge_dict({
'User-Agent': cls.user_agent,
'Content-Type': 'application/x-www-form-urlencoded'}, headers or {})
data = api.http_call(url, 'POST', data=body, headers=headers)
return cls(data, api=api)
class Tokeninfo(Base):
path = "v1/identity/openidconnect/tokenservice"
@classmethod
def create(cls, options=None, api=None):
options = options or {}
api = api or default_api()
if isinstance(options, string_types):
options = {'code': options}
options = util.merge_dict({
'grant_type': 'authorization_code',
'client_id': client_id(api),
'client_secret': client_secret(api)
}, options)
return cls.post(cls.path, options, api=api)
@classmethod
def create_with_refresh_token(cls, options=None, api=None):
options = options or {}
api = api or default_api()
if isinstance(options, string_types):
options = {'refresh_token': options}
options = util.merge_dict({
'grant_type': 'refresh_token',
'client_id': client_id(api),
'client_secret': client_secret(api)
}, options)
return cls.post(cls.path, options, api=api)
@classmethod
def authorize_url(cls, options=None, api=None):
return authorize_url(options or {}, api=api)
def logout_url(self, options=None, api=None):
return logout_url(util.merge_dict({'id_token': self.id_token}, options or {}), api=api)
def refresh(self, options=None, api=None):
options = util.merge_dict({'refresh_token': self.refresh_token}, options or {})
tokeninfo = self.__class__.create_with_refresh_token(options, api=api)
self.merge(tokeninfo.to_dict())
return self
def userinfo(self, options=None, api=None):
return Userinfo.get(util.merge_dict({'access_token': self.access_token}, options or {}), api=api)
class Userinfo(Base):
path = "v1/identity/openidconnect/userinfo"
@classmethod
def get(cls, options=None, api=None):
options = options or {}
if isinstance(options, string_types):
options = {'access_token': options}
options = util.merge_dict({'schema': 'openid'}, options)
api = api or default_api()
return cls.post(cls.path, options, api=api)
def endpoint(api=None):
api = api or default_api()
return api.options.get("openid_endpoint", api.endpoint)
def client_id(api=None):
api = api or default_api()
return api.options.get("openid_client_id", api.client_id)
def client_secret(api=None):
api = api or default_api()
return api.options.get("openid_client_secret", api.client_secret)
def redirect_uri(api=None):
api = api or default_api()
return api.options.get("openid_redirect_uri")
start_session_path = "/signin/authorize"
end_session_path = "/webapps/auth/protocol/openidconnect/v1/endsession"
def session_url(path, options=None, api=None):
api = api or default_api()
if api.mode == "live":
path = util.join_url("https://www.paypal.com", path)
else:
path = util.join_url("https://www.sandbox.paypal.com", path)
return util.join_url_params(path, options or {})
def authorize_url(options=None, api=None):
api = api or default_api()
options = util.merge_dict({
'response_type': 'code',
'scope': 'openid',
'client_id': client_id(api),
'redirect_uri': redirect_uri(api)
}, options or {})
return session_url(start_session_path, options, api=api)
def logout_url(options=None, api=None):
api = api or default_api()
options = util.merge_dict({
'logout': 'true',
'redirect_uri': redirect_uri(api)
}, options or {})
return session_url(end_session_path, options, api=api)
| true
| true
|
79053237373e9febfbe186d41fe2f3cf64ee488f
| 4,610
|
py
|
Python
|
sigmapiweb/apps/PubSite/views.py
|
Jacobvs/sigmapi-web
|
ca8d5a5294385fe5f4634c483a1278df904e2f85
|
[
"MIT"
] | 8
|
2018-01-19T15:27:24.000Z
|
2022-02-04T05:57:01.000Z
|
sigmapiweb/apps/PubSite/views.py
|
Jacobvs/sigmapi-web
|
ca8d5a5294385fe5f4634c483a1278df904e2f85
|
[
"MIT"
] | 71
|
2017-07-17T04:44:35.000Z
|
2022-02-19T19:33:24.000Z
|
sigmapiweb/apps/PubSite/views.py
|
Jacobvs/sigmapi-web
|
ca8d5a5294385fe5f4634c483a1278df904e2f85
|
[
"MIT"
] | 6
|
2019-04-12T03:18:12.000Z
|
2021-09-28T23:33:12.000Z
|
"""
Views for PubSite app.
"""
from django.conf import settings
from django.contrib.auth.views import (
PasswordResetView,
PasswordResetDoneView,
PasswordResetConfirmView,
PasswordResetCompleteView,
)
from django.shortcuts import render
import requests
import logging
logger = logging.getLogger(__name__)
def _get_context(page_name):
return {
"pages": settings.PUBLIC_PAGES,
"current_page_name": page_name,
}
# Regular index
# def index(request):
# """
# View for the static index page
# """
# return render(request, 'public/home.html', _get_context('Home'))
def index(request):
"""
View for the static index page
"""
return render(request, "public/home.html", _get_context("Home"))
def about(request):
"""
View for the static chapter history page.
"""
return render(request, "public/about.html", _get_context("About"))
def activities(request):
"""
View for the static chapter service page.
"""
return render(
request,
"public/activities.html",
_get_context("Service & Activities"),
)
def rush(request):
"""
View for the static chapter service page.
"""
return render(
request,
"public/rush.html",
_get_context("Rush"),
)
def campaign(request):
"""
View for the campaign service page.
"""
# Overrride requests Session authentication handling
class NoRebuildAuthSession(requests.Session):
def rebuild_auth(self, prepared_request, response):
"""
No code here means requests will always preserve the Authorization
header when redirected.
Be careful not to leak your credentials to untrusted hosts!
"""
url = "https://api.givebutter.com/v1/transactions/"
headers = {"Authorization": f"Bearer {settings.GIVEBUTTER_API_KEY}"}
response = None
# Create custom requests session
session = NoRebuildAuthSession()
# Make GET request to server, timeout in seconds
try:
r = session.get(url, headers=headers, timeout=0.75)
if r.status_code == 200:
response = r.json()
else:
logger.error(f"ERROR in request: {r.status_code}")
except requests.exceptions.Timeout:
logger.warning("Connection to GiveButter API Timed out")
except requests.ConnectionError:
logger.warning("Connection to GiveButter API could not be resolved")
except requests.exceptions.RequestException:
logger.error(
"An unknown issue occurred while trying to retrieve GiveButter Donor List"
)
# Grab context object to use later
ctx = _get_context("Campaign")
# Check for successful response, if so - filter, sort, and format data
if response and "data" in response:
response = response["data"] # Pull data from GET response object
logger.debug(f"GiveButter API Response: {response}")
# Filter by only successful transactions, then sort by amount descending
successful_txs = [tx for tx in response if tx["status"] == "succeeded"]
sorted_txs = sorted(successful_txs, key=lambda tx: tx["amount"], reverse=True)
# Clean data to a list of dictionaries & remove unnecessary data
transactions = [
{
"name": tx["giving_space"]["name"],
"amount": tx["giving_space"]["amount"],
"message": tx["giving_space"]["message"],
}
for tx in sorted_txs[:20]
]
# Attach transaction dictionary & length to context object
ctx["transactions"] = transactions
ctx["num_txs"] = len(successful_txs)
return render(
request,
"public/campaign.html",
ctx,
)
def permission_denied(request):
"""
View for 403 (Permission Denied) error.
"""
return render(
request,
"common/403.html",
_get_context("Permission Denied"),
)
def handler404(request, exception):
""" """
return render(request, "common/404.html", _get_context("Page Not Found"))
class ResetPassword(PasswordResetView):
template_name = "password_reset/password_reset_form.html"
class ResetPasswordDone(PasswordResetDoneView):
template_name = "password_reset/password_reset_done.html"
class ResetPasswordConfirm(PasswordResetConfirmView):
template_name = "password_reset/password_reset_confirm.html"
class ResetPasswordComplete(PasswordResetCompleteView):
template_name = "password_reset/password_reset_complete.html"
| 27.440476
| 86
| 0.652061
|
from django.conf import settings
from django.contrib.auth.views import (
PasswordResetView,
PasswordResetDoneView,
PasswordResetConfirmView,
PasswordResetCompleteView,
)
from django.shortcuts import render
import requests
import logging
logger = logging.getLogger(__name__)
def _get_context(page_name):
return {
"pages": settings.PUBLIC_PAGES,
"current_page_name": page_name,
}
# View for the static index page
# """
def index(request):
return render(request, "public/home.html", _get_context("Home"))
def about(request):
return render(request, "public/about.html", _get_context("About"))
def activities(request):
return render(
request,
"public/activities.html",
_get_context("Service & Activities"),
)
def rush(request):
return render(
request,
"public/rush.html",
_get_context("Rush"),
)
def campaign(request):
class NoRebuildAuthSession(requests.Session):
def rebuild_auth(self, prepared_request, response):
url = "https://api.givebutter.com/v1/transactions/"
headers = {"Authorization": f"Bearer {settings.GIVEBUTTER_API_KEY}"}
response = None
session = NoRebuildAuthSession()
try:
r = session.get(url, headers=headers, timeout=0.75)
if r.status_code == 200:
response = r.json()
else:
logger.error(f"ERROR in request: {r.status_code}")
except requests.exceptions.Timeout:
logger.warning("Connection to GiveButter API Timed out")
except requests.ConnectionError:
logger.warning("Connection to GiveButter API could not be resolved")
except requests.exceptions.RequestException:
logger.error(
"An unknown issue occurred while trying to retrieve GiveButter Donor List"
)
ctx = _get_context("Campaign")
if response and "data" in response:
response = response["data"]
logger.debug(f"GiveButter API Response: {response}")
successful_txs = [tx for tx in response if tx["status"] == "succeeded"]
sorted_txs = sorted(successful_txs, key=lambda tx: tx["amount"], reverse=True)
transactions = [
{
"name": tx["giving_space"]["name"],
"amount": tx["giving_space"]["amount"],
"message": tx["giving_space"]["message"],
}
for tx in sorted_txs[:20]
]
ctx["transactions"] = transactions
ctx["num_txs"] = len(successful_txs)
return render(
request,
"public/campaign.html",
ctx,
)
def permission_denied(request):
return render(
request,
"common/403.html",
_get_context("Permission Denied"),
)
def handler404(request, exception):
return render(request, "common/404.html", _get_context("Page Not Found"))
class ResetPassword(PasswordResetView):
template_name = "password_reset/password_reset_form.html"
class ResetPasswordDone(PasswordResetDoneView):
template_name = "password_reset/password_reset_done.html"
class ResetPasswordConfirm(PasswordResetConfirmView):
template_name = "password_reset/password_reset_confirm.html"
class ResetPasswordComplete(PasswordResetCompleteView):
template_name = "password_reset/password_reset_complete.html"
| true
| true
|
79053243276532245599bd5bb291b2fa2c8e81f6
| 7,583
|
py
|
Python
|
darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/update_http_redirect_details.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | null | null | null |
darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/update_http_redirect_details.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | null | null | null |
darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/update_http_redirect_details.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | 1
|
2020-06-25T03:12:58.000Z
|
2020-06-25T03:12:58.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateHttpRedirectDetails(object):
"""
The details of a HTTP Redirect configured to redirect traffic from one hostname to another.
**Warning:** Oracle recommends that you avoid using any confidential information when you supply string values using the API.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateHttpRedirectDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param display_name:
The value to assign to the display_name property of this UpdateHttpRedirectDetails.
:type display_name: str
:param target:
The value to assign to the target property of this UpdateHttpRedirectDetails.
:type target: HttpRedirectTarget
:param response_code:
The value to assign to the response_code property of this UpdateHttpRedirectDetails.
:type response_code: int
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateHttpRedirectDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this UpdateHttpRedirectDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'display_name': 'str',
'target': 'HttpRedirectTarget',
'response_code': 'int',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'display_name': 'displayName',
'target': 'target',
'response_code': 'responseCode',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._display_name = None
self._target = None
self._response_code = None
self._freeform_tags = None
self._defined_tags = None
@property
def display_name(self):
"""
Gets the display_name of this UpdateHttpRedirectDetails.
The user-friendly name of the HTTP Redirect. The name can be changed and does not need to be unique.
:return: The display_name of this UpdateHttpRedirectDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this UpdateHttpRedirectDetails.
The user-friendly name of the HTTP Redirect. The name can be changed and does not need to be unique.
:param display_name: The display_name of this UpdateHttpRedirectDetails.
:type: str
"""
self._display_name = display_name
@property
def target(self):
"""
Gets the target of this UpdateHttpRedirectDetails.
The redirect target object including all the redirect data.
:return: The target of this UpdateHttpRedirectDetails.
:rtype: HttpRedirectTarget
"""
return self._target
@target.setter
def target(self, target):
"""
Sets the target of this UpdateHttpRedirectDetails.
The redirect target object including all the redirect data.
:param target: The target of this UpdateHttpRedirectDetails.
:type: HttpRedirectTarget
"""
self._target = target
@property
def response_code(self):
"""
Gets the response_code of this UpdateHttpRedirectDetails.
The response code returned for the redirect to the client. For more information, see `RFC 7231`__.
__ https://tools.ietf.org/html/rfc7231#section-6.4
:return: The response_code of this UpdateHttpRedirectDetails.
:rtype: int
"""
return self._response_code
@response_code.setter
def response_code(self, response_code):
"""
Sets the response_code of this UpdateHttpRedirectDetails.
The response code returned for the redirect to the client. For more information, see `RFC 7231`__.
__ https://tools.ietf.org/html/rfc7231#section-6.4
:param response_code: The response_code of this UpdateHttpRedirectDetails.
:type: int
"""
self._response_code = response_code
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this UpdateHttpRedirectDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this UpdateHttpRedirectDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this UpdateHttpRedirectDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this UpdateHttpRedirectDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this UpdateHttpRedirectDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this UpdateHttpRedirectDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this UpdateHttpRedirectDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this UpdateHttpRedirectDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 34.312217
| 245
| 0.66148
|
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateHttpRedirectDetails(object):
def __init__(self, **kwargs):
self.swagger_types = {
'display_name': 'str',
'target': 'HttpRedirectTarget',
'response_code': 'int',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'display_name': 'displayName',
'target': 'target',
'response_code': 'responseCode',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._display_name = None
self._target = None
self._response_code = None
self._freeform_tags = None
self._defined_tags = None
@property
def display_name(self):
return self._display_name
@display_name.setter
def display_name(self, display_name):
self._display_name = display_name
@property
def target(self):
return self._target
@target.setter
def target(self, target):
self._target = target
@property
def response_code(self):
return self._response_code
@response_code.setter
def response_code(self, response_code):
self._response_code = response_code
@property
def freeform_tags(self):
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
7905338c7a945fdaa339c63d1255e3cd348a7d62
| 2,002
|
py
|
Python
|
Project/algorithm.old.py
|
aksh4y/Papers
|
f7d89c22cafdb952d57467ab9254c17e8f5d2d4b
|
[
"MIT"
] | 5
|
2017-10-06T07:06:53.000Z
|
2021-03-08T09:12:33.000Z
|
Project/algorithm.old.py
|
aksh4y/Papers
|
f7d89c22cafdb952d57467ab9254c17e8f5d2d4b
|
[
"MIT"
] | 1
|
2017-07-07T18:46:15.000Z
|
2017-07-08T07:19:09.000Z
|
Project/algorithm.old.py
|
aksh4y/Papers
|
f7d89c22cafdb952d57467ab9254c17e8f5d2d4b
|
[
"MIT"
] | 2
|
2017-11-08T07:54:14.000Z
|
2017-11-13T05:17:37.000Z
|
#Import Library
import warnings
import numpy as np
import datetime
from extract_data import *
from word_encoder import *
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn import tree
# send the extracted data availble from extract_data to the encode function
# this function vectorizes the text based data into ASCII format for use by
# the algorithms
encoded_data = encode(data)
scores = []
# convert the float scores to int. Multiplying by 10 helps us keep the decimal
# level precision which would otherwise be lost in typecasting
i = 0
while i < len(label):
scores.append(int (float(label[i]) * 10))
i += 1;
# ignore depricated warning
def warn(*args, **kwargs):
pass
warnings.warn = warn
# SVM classifier
svm_clf = svm.SVC(kernel = 'linear')
#svm_clf.fit(encoded_data, scores)
# Gaussian Naive Bayes
gnb_clf = GaussianNB()
gnb_clf.fit(encoded_data, scores)
# Random Forest
rf_clf = RandomForestClassifier(n_estimators=10)
rf_clf.fit(encoded_data, scores)
# Decision Tree
dt_clf = tree.DecisionTreeClassifier()
dt_clf.fit(encoded_data, scores)
#print("SVM:")
#print(svm_clf.predict ([1403, 2752, 3263, 4200, 4309, 4417, 4518, 4675, 5909, 6102, 6500, 8459, 8672, 8882, 9712, 9810, 10524, 10757, 11096, 11299, 11461, 11617, 11775]))
print("Gaussian Naive Bayes:")
print(gnb_clf.predict ([1403, 2752, 3263, 4200, 4309, 4417, 4518, 4675, 5909, 6102, 6500, 8459, 8672, 8882, 9712, 9810, 10524, 10757, 11096, 11299, 11461, 11617, 11775]))
print("Random Forest:")
print(rf_clf.predict ([1403, 2752, 3263, 4200, 4309, 4417, 4518, 4675, 5909, 6102, 6500, 8459, 8672, 8882, 9712, 9810, 10524, 10757, 11096, 11299, 11461, 11617, 11775]))
print("Decision Tree:")
print(dt_clf.predict ([1403, 2752, 3263, 4200, 4309, 4417, 4518, 4675, 5909, 6102, 6500, 8459, 8672, 8882, 9712, 9810, 10524, 10757, 11096, 11299, 11461, 11617, 11775]))
print("End time: " + str(datetime.datetime.now()).split('.')[0])
| 30.8
| 171
| 0.732767
|
import warnings
import numpy as np
import datetime
from extract_data import *
from word_encoder import *
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn import tree
encoded_data = encode(data)
scores = []
i = 0
while i < len(label):
scores.append(int (float(label[i]) * 10))
i += 1;
def warn(*args, **kwargs):
pass
warnings.warn = warn
svm_clf = svm.SVC(kernel = 'linear')
gnb_clf = GaussianNB()
gnb_clf.fit(encoded_data, scores)
rf_clf = RandomForestClassifier(n_estimators=10)
rf_clf.fit(encoded_data, scores)
dt_clf = tree.DecisionTreeClassifier()
dt_clf.fit(encoded_data, scores)
print("Gaussian Naive Bayes:")
print(gnb_clf.predict ([1403, 2752, 3263, 4200, 4309, 4417, 4518, 4675, 5909, 6102, 6500, 8459, 8672, 8882, 9712, 9810, 10524, 10757, 11096, 11299, 11461, 11617, 11775]))
print("Random Forest:")
print(rf_clf.predict ([1403, 2752, 3263, 4200, 4309, 4417, 4518, 4675, 5909, 6102, 6500, 8459, 8672, 8882, 9712, 9810, 10524, 10757, 11096, 11299, 11461, 11617, 11775]))
print("Decision Tree:")
print(dt_clf.predict ([1403, 2752, 3263, 4200, 4309, 4417, 4518, 4675, 5909, 6102, 6500, 8459, 8672, 8882, 9712, 9810, 10524, 10757, 11096, 11299, 11461, 11617, 11775]))
print("End time: " + str(datetime.datetime.now()).split('.')[0])
| true
| true
|
790533b50a0ea078a1a3c933f7e6fd772a34b2c3
| 295
|
py
|
Python
|
manager.py
|
xj951103/baseProject
|
60da4337881aa1f629e2aa0781371ef9ae5e30d4
|
[
"MIT"
] | null | null | null |
manager.py
|
xj951103/baseProject
|
60da4337881aa1f629e2aa0781371ef9ae5e30d4
|
[
"MIT"
] | null | null | null |
manager.py
|
xj951103/baseProject
|
60da4337881aa1f629e2aa0781371ef9ae5e30d4
|
[
"MIT"
] | null | null | null |
import os
from flask_script import Manager
from flask_migrate import MigrateCommand
from App import create_app
env = os.environ.get("flask_env", "develop")
app = create_app(env)
manager = Manager(app)
manager.add_command("db", MigrateCommand)
if __name__ == '__main__':
manager.run()
| 16.388889
| 44
| 0.755932
|
import os
from flask_script import Manager
from flask_migrate import MigrateCommand
from App import create_app
env = os.environ.get("flask_env", "develop")
app = create_app(env)
manager = Manager(app)
manager.add_command("db", MigrateCommand)
if __name__ == '__main__':
manager.run()
| true
| true
|
790533ce262cb8dd7ac469fc4653ac036c9c2b85
| 996
|
py
|
Python
|
components/notifier/src/models/notification.py
|
m-zakeri/quality-time
|
531931f0d8d4f5d262ea98445868158e41d268da
|
[
"Apache-2.0"
] | null | null | null |
components/notifier/src/models/notification.py
|
m-zakeri/quality-time
|
531931f0d8d4f5d262ea98445868158e41d268da
|
[
"Apache-2.0"
] | null | null | null |
components/notifier/src/models/notification.py
|
m-zakeri/quality-time
|
531931f0d8d4f5d262ea98445868158e41d268da
|
[
"Apache-2.0"
] | null | null | null |
"""Notification."""
from models.metric_notification_data import MetricNotificationData
class Notification:
"""Handle notification contents and status."""
def __init__(self, report, metrics, destination_uuid, destination):
"""Initialise the Notification with the required info."""
self.report_title = report["title"]
self.url = report.get("url")
self.metrics: list[MetricNotificationData] = metrics
self.destination_uuid = destination_uuid
self.destination = destination
def __eq__(self, other):
"""Check if the notification itself is the same, regardless of its metric content."""
return (
self.report_title == other.report_title
and self.destination_uuid == other.destination_uuid
and self.destination == other.destination
)
def merge_notification(self, new_metrics):
"""Merge new metrics into this notification."""
self.metrics.extend(new_metrics)
| 35.571429
| 93
| 0.678715
|
from models.metric_notification_data import MetricNotificationData
class Notification:
def __init__(self, report, metrics, destination_uuid, destination):
self.report_title = report["title"]
self.url = report.get("url")
self.metrics: list[MetricNotificationData] = metrics
self.destination_uuid = destination_uuid
self.destination = destination
def __eq__(self, other):
return (
self.report_title == other.report_title
and self.destination_uuid == other.destination_uuid
and self.destination == other.destination
)
def merge_notification(self, new_metrics):
self.metrics.extend(new_metrics)
| true
| true
|
7905352946a0c5b16b0b9b35510899a4d53b223c
| 83,904
|
py
|
Python
|
sdk/storage/azure-storage-blob/tests/test_common_blob_async.py
|
hytao/azure-sdk-for-python
|
382994348b289d4cb90dc74e2ddc9fadc7c5824d
|
[
"MIT"
] | null | null | null |
sdk/storage/azure-storage-blob/tests/test_common_blob_async.py
|
hytao/azure-sdk-for-python
|
382994348b289d4cb90dc74e2ddc9fadc7c5824d
|
[
"MIT"
] | null | null | null |
sdk/storage/azure-storage-blob/tests/test_common_blob_async.py
|
hytao/azure-sdk-for-python
|
382994348b289d4cb90dc74e2ddc9fadc7c5824d
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from enum import Enum
import pytest
import aiohttp
import asyncio
import requests
import time
import unittest
import os
from datetime import datetime, timedelta
from azure.core.exceptions import (
HttpResponseError,
ResourceNotFoundError,
ResourceExistsError,
ClientAuthenticationError)
from azure.core.pipeline.transport import AsyncioRequestsTransport
from azure.core.pipeline.transport import AioHttpTransport
from multidict import CIMultiDict, CIMultiDictProxy
from azure.storage.blob.aio import (
BlobServiceClient,
ContainerClient,
BlobClient,
upload_blob_to_url,
download_blob_from_url,
)
from azure.storage.blob import (
generate_blob_sas,
generate_account_sas,
generate_container_sas,
BlobType,
StorageErrorCode,
BlobSasPermissions,
ContainerSasPermissions,
ContentSettings,
BlobProperties,
RetentionPolicy,
AccessPolicy,
ResourceTypes,
AccountSasPermissions,
StandardBlobTier)
from devtools_testutils import ResourceGroupPreparer, StorageAccountPreparer
from _shared.testcase import GlobalStorageAccountPreparer
from _shared.asynctestcase import AsyncStorageTestCase
# ------------------------------------------------------------------------------
TEST_CONTAINER_PREFIX = 'container'
TEST_BLOB_PREFIX = 'blob'
# ------------------------------------------------------------------------------
class AiohttpTestTransport(AioHttpTransport):
"""Workaround to vcrpy bug: https://github.com/kevin1024/vcrpy/pull/461
"""
async def send(self, request, **config):
response = await super(AiohttpTestTransport, self).send(request, **config)
if not isinstance(response.headers, CIMultiDictProxy):
response.headers = CIMultiDictProxy(CIMultiDict(response.internal_response.headers))
response.content_type = response.headers.get("content-type")
return response
class StorageCommonBlobTestAsync(AsyncStorageTestCase):
# --Helpers-----------------------------------------------------------------
async def _setup(self, name, key):
self.bsc = BlobServiceClient(self.account_url(name, "blob"), credential=key, transport=AiohttpTestTransport())
self.container_name = self.get_resource_name('utcontainer')
self.byte_data = self.get_random_bytes(1024)
if self.is_live:
container = self.bsc.get_container_client(self.container_name)
try:
await container.create_container(timeout=5)
except ResourceExistsError:
pass
async def _setup_remote(self, name, key):
self.bsc2 = BlobServiceClient(self.account_url(name, "blob"), credential=key)
self.remote_container_name = 'rmt'
def _teardown(self, FILE_PATH):
if os.path.isfile(FILE_PATH):
try:
os.remove(FILE_PATH)
except:
pass
def _get_container_reference(self):
return self.get_resource_name(TEST_CONTAINER_PREFIX)
def _get_blob_reference(self):
return self.get_resource_name(TEST_BLOB_PREFIX)
async def _create_block_blob(self):
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(self.byte_data, length=len(self.byte_data))
return blob_name
async def _create_remote_container(self):
self.remote_container_name = self.get_resource_name('remotectnr')
remote_container = self.bsc2.get_container_client(self.remote_container_name)
try:
await remote_container.create_container()
except ResourceExistsError:
pass
async def _create_remote_block_blob(self, blob_data=None):
if not blob_data:
blob_data = b'12345678' * 1024 * 1024
source_blob_name = self._get_blob_reference()
source_blob = self.bsc2.get_blob_client(self.remote_container_name, source_blob_name)
await source_blob.upload_blob(blob_data, overwrite=True)
return source_blob
async def _wait_for_async_copy(self, blob):
count = 0
props = await blob.get_blob_properties()
while props.copy.status == 'pending':
count = count + 1
if count > 10:
self.fail('Timed out waiting for async copy to complete.')
self.sleep(6)
props = await blob.get_blob_properties()
return props
async def _enable_soft_delete(self):
delete_retention_policy = RetentionPolicy(enabled=True, days=2)
await self.bsc.set_service_properties(delete_retention_policy=delete_retention_policy)
# wait until the policy has gone into effect
if self.is_live:
time.sleep(30)
async def _disable_soft_delete(self):
delete_retention_policy = RetentionPolicy(enabled=False)
await self.bsc.set_service_properties(delete_retention_policy=delete_retention_policy)
def _assert_blob_is_soft_deleted(self, blob):
self.assertTrue(blob.deleted)
self.assertIsNotNone(blob.deleted_time)
self.assertIsNotNone(blob.remaining_retention_days)
def _assert_blob_not_soft_deleted(self, blob):
self.assertFalse(blob.deleted)
self.assertIsNone(blob.deleted_time)
self.assertIsNone(blob.remaining_retention_days)
# -- Common test cases for blobs ----------------------------------------------
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_blob_exists(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
exists = await blob.get_blob_properties()
# Assert
self.assertTrue(exists)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_blob_not_exists(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
with self.assertRaises(ResourceNotFoundError):
await blob.get_blob_properties()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_blob_snapshot_exists(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
snapshot = await blob.create_snapshot()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name, snapshot=snapshot)
exists = await blob.get_blob_properties()
# Assert
self.assertTrue(exists)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_blob_snapshot_not_exists(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name, snapshot="1988-08-18T07:52:31.6690068Z")
with self.assertRaises(ResourceNotFoundError):
await blob.get_blob_properties()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_blob_container_not_exists(self, resource_group, location, storage_account, storage_account_key):
# In this case both the blob and container do not exist
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
# Act
blob = self.bsc.get_blob_client(self._get_container_reference(), blob_name)
with self.assertRaises(ResourceNotFoundError):
await blob.get_blob_properties()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_question_mark(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = '?ques?tion?'
blob_data = u'???'
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(blob_data)
# Assert
stream = await blob.download_blob()
data = await stream.readall()
self.assertIsNotNone(data)
content = data.decode('utf-8')
self.assertEqual(content, blob_data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_special_chars(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
# Act
for c in '-._ /()$=\',~':
blob_name = '{0}a{0}a{0}'.format(c)
blob_data = c
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(blob_data, length=len(blob_data))
data = await (await blob.download_blob()).readall()
content = data.decode('utf-8')
self.assertEqual(content, blob_data)
# Assert
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_lease_id(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
# Act
data = b'hello world again'
resp = await blob.upload_blob(data, length=len(data), lease=lease)
# Assert
self.assertIsNotNone(resp.get('etag'))
stream = await blob.download_blob(lease=lease)
content = await stream.readall()
self.assertEqual(content, data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_metadata(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
metadata = {'hello': 'world', 'number': '42'}
# Act
data = b'hello world'
blob = self.bsc.get_blob_client(self.container_name, blob_name)
resp = await blob.upload_blob(data, length=len(data), metadata=metadata)
# Assert
self.assertIsNotNone(resp.get('etag'))
md = (await blob.get_blob_properties()).metadata
self.assertDictEqual(md, metadata)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_generator_async(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
# Act
def gen():
yield "hello"
yield "world!"
yield " eom"
blob = self.bsc.get_blob_client(self.container_name, "gen_blob")
resp = await blob.upload_blob(data=gen())
# Assert
self.assertIsNotNone(resp.get('etag'))
content = await (await blob.download_blob()).readall()
self.assertEqual(content, b"helloworld! eom")
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_requests_async(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
# Act
uri = "http://www.gutenberg.org/files/59466/59466-0.txt"
data = requests.get(uri, stream=True)
blob = self.bsc.get_blob_client(self.container_name, "gutenberg")
resp = await blob.upload_blob(data=data.raw)
self.assertIsNotNone(resp.get('etag'))
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_aiohttp_async(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
blob = self.bsc.get_blob_client(self.container_name, "gutenberg")
# Act
uri = "http://www.gutenberg.org/files/59466/59466-0.txt"
async with aiohttp.ClientSession() as session:
async with session.get(uri) as data:
async for text, _ in data.content.iter_chunks():
resp = await blob.upload_blob(data=text)
self.assertIsNotNone(resp.get('etag'))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
stream = await blob.download_blob()
content = await stream.readall()
# Assert
self.assertEqual(content, self.byte_data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_snapshot(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
snap = await blob.create_snapshot()
snapshot = self.bsc.get_blob_client(
self.container_name, blob_name, snapshot=snap)
# Act
stream = await snapshot.download_blob()
content = await stream.readall()
# Assert
self.assertEqual(content, self.byte_data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_snapshot_previous(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
snap = await blob.create_snapshot()
snapshot = self.bsc.get_blob_client(
self.container_name, blob_name, snapshot=snap)
upload_data = b'hello world again'
await blob.upload_blob(upload_data, length=len(upload_data), overwrite=True)
# Act
blob_previous = await snapshot.download_blob()
blob_previous_bytes = await blob_previous.readall()
blob_latest = await blob.download_blob()
blob_latest_bytes = await blob_latest.readall()
# Assert
self.assertEqual(blob_previous_bytes, self.byte_data)
self.assertEqual(blob_latest_bytes, b'hello world again')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_range(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
stream = await blob.download_blob(offset=0, length=5)
content = await stream.readall()
# Assert
self.assertEqual(content, self.byte_data[:5])
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_lease(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
# Act
stream = await blob.download_blob(lease=lease)
content = await stream.readall()
await lease.release()
# Assert
self.assertEqual(content, self.byte_data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_non_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
with self.assertRaises(ResourceNotFoundError):
await blob.download_blob()
# Assert
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_set_blob_properties_with_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.set_http_headers(
content_settings=ContentSettings(
content_language='spanish',
content_disposition='inline'),
)
# Assert
props = await blob.get_blob_properties()
self.assertEqual(props.content_settings.content_language, 'spanish')
self.assertEqual(props.content_settings.content_disposition, 'inline')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_set_blob_properties_with_blob_settings_param(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
props = await blob.get_blob_properties()
# Act
props.content_settings.content_language = 'spanish'
props.content_settings.content_disposition = 'inline'
await blob.set_http_headers(content_settings=props.content_settings)
# Assert
props = await blob.get_blob_properties()
self.assertEqual(props.content_settings.content_language, 'spanish')
self.assertEqual(props.content_settings.content_disposition, 'inline')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_properties(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
props = await blob.get_blob_properties()
# Assert
self.assertIsInstance(props, BlobProperties)
self.assertEqual(props.blob_type, BlobType.BlockBlob)
self.assertEqual(props.size, len(self.byte_data))
self.assertEqual(props.lease.status, 'unlocked')
self.assertIsNotNone(props.creation_time)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_properties_fail(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name, snapshot=1)
with self.assertRaises(HttpResponseError) as e:
await blob.get_blob_properties() # Invalid snapshot value of 1
# Assert
# TODO: No error code returned
# self.assertEqual(StorageErrorCode.invalid_query_parameter_value, e.exception.error_code)
# This test is to validate that the ErrorCode is retrieved from the header during a
# GET request. This is preferred to relying on the ErrorCode in the body.
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_metadata_fail(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name, snapshot=1)
with self.assertRaises(HttpResponseError) as e:
(await blob.get_blob_properties()).metadata # Invalid snapshot value of 1
# Assert
# TODO: No error code returned
# self.assertEqual(StorageErrorCode.invalid_query_parameter_value, e.exception.error_code)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_server_encryption(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = await blob.download_blob()
# Assert
self.assertTrue(data.properties.server_encrypted)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_properties_server_encryption(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
props = await blob.get_blob_properties()
# Assert
self.assertTrue(props.server_encrypted)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_list_blobs_server_encryption(self, resource_group, location, storage_account, storage_account_key):
# test can only run live
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._create_block_blob()
container = self.bsc.get_container_client(self.container_name)
blob_list = []
async for b in container.list_blobs():
blob_list.append(b)
# Act
# Assert
for blob in blob_list:
self.assertTrue(blob.server_encrypted)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_no_server_encryption(self, resource_group, location, storage_account, storage_account_key):
pytest.skip("Aiohttp headers dict (CIMultiDictProxy) is immutable.")
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
def callback(response):
response.http_response.headers['x-ms-server-encrypted'] = 'false'
props = await blob.get_blob_properties(raw_response_hook=callback)
# Assert
self.assertFalse(props.server_encrypted)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_properties_with_snapshot(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
container = self.bsc.get_container_client(self.container_name)
blob = self.bsc.get_blob_client(self.container_name, blob_name)
res = await blob.create_snapshot()
blobs = []
async for b in container.list_blobs(include='snapshots'):
blobs.append(b)
self.assertEqual(len(blobs), 2)
# Act
snapshot = self.bsc.get_blob_client(self.container_name, blob_name, snapshot=res)
props = await snapshot.get_blob_properties()
# Assert
self.assertIsNotNone(blob)
self.assertEqual(props.blob_type, BlobType.BlockBlob)
self.assertEqual(props.size, len(self.byte_data))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_properties_with_leased_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
# Act
props = await blob.get_blob_properties()
# Assert
self.assertIsInstance(props, BlobProperties)
self.assertEqual(props.blob_type, BlobType.BlockBlob)
self.assertEqual(props.size, len(self.byte_data))
self.assertEqual(props.lease.status, 'locked')
self.assertEqual(props.lease.state, 'leased')
self.assertEqual(props.lease.duration, 'infinite')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_metadata(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
md = (await blob.get_blob_properties()).metadata
# Assert
self.assertIsNotNone(md)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_set_blob_metadata_with_upper_case(self, resource_group, location, storage_account, storage_account_key):
# bug in devtools...converts upper case header to lowercase
# passes live.
# Arrange
await self._setup(storage_account.name, storage_account_key)
metadata = {'hello': 'world', 'number': '42', 'UP': 'UPval'}
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.set_blob_metadata(metadata)
# Assert
md = (await blob.get_blob_properties()).metadata
self.assertEqual(3, len(md))
self.assertEqual(md['hello'], 'world')
self.assertEqual(md['number'], '42')
self.assertEqual(md['UP'], 'UPval')
self.assertFalse('up' in md)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_delete_blob_with_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
resp = await blob.delete_blob()
# Assert
self.assertIsNone(resp)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_delete_blob_with_non_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
with self.assertRaises(ResourceNotFoundError):
await blob.delete_blob()
# Assert
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_delete_blob_snapshot(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
snap = await blob.create_snapshot()
snapshot = self.bsc.get_blob_client(
self.container_name, blob_name, snapshot=snap)
# Act
await snapshot.delete_blob()
# Assert
container = self.bsc.get_container_client(self.container_name)
blobs = []
async for b in container.list_blobs(include='snapshots'):
blobs.append(b)
self.assertEqual(len(blobs), 1)
self.assertEqual(blobs[0].name, blob_name)
self.assertIsNone(blobs[0].snapshot)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_delete_blob_snapshots(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.create_snapshot()
# Act
await blob.delete_blob(delete_snapshots='only')
# Assert
container = self.bsc.get_container_client(self.container_name)
blobs = []
async for b in container.list_blobs(include='snapshots'):
blobs.append(b)
self.assertEqual(len(blobs), 1)
self.assertIsNone(blobs[0].snapshot)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_delete_blob_with_snapshots(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.create_snapshot()
# Act
# with self.assertRaises(HttpResponseError):
# blob.delete_blob()
await blob.delete_blob(delete_snapshots='include')
# Assert
container = self.bsc.get_container_client(self.container_name)
blobs = []
async for b in container.list_blobs(include='snapshots'):
blobs.append(b)
self.assertEqual(len(blobs), 0)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_soft_delete_blob_without_snapshots(self, resource_group, location, storage_account, storage_account_key):
try:
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._enable_soft_delete()
blob_name = await self._create_block_blob()
container = self.bsc.get_container_client(self.container_name)
blob = container.get_blob_client(blob_name)
# Soft delete the blob
await blob.delete_blob()
blob_list = []
async for b in container.list_blobs(include='deleted'):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 1)
self._assert_blob_is_soft_deleted(blob_list[0])
# list_blobs should not list soft deleted blobs if Include(deleted=True) is not specified
blob_list = []
async for b in container.list_blobs():
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 0)
# Restore blob with undelete
await blob.undelete_blob()
blob_list = []
async for b in container.list_blobs(include='deleted'):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 1)
self._assert_blob_not_soft_deleted(blob_list[0])
finally:
await self._disable_soft_delete()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_soft_delete_single_blob_snapshot(self, resource_group, location, storage_account, storage_account_key):
try:
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._enable_soft_delete()
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
blob_snapshot_1 = await blob.create_snapshot()
blob_snapshot_2 = await blob.create_snapshot()
# Soft delete blob_snapshot_1
snapshot_1 = self.bsc.get_blob_client(
self.container_name, blob_name, snapshot=blob_snapshot_1)
await snapshot_1.delete_blob()
with self.assertRaises(ValueError):
await snapshot_1.delete_blob(delete_snapshots='only')
container = self.bsc.get_container_client(self.container_name)
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for listedblob in blob_list:
if listedblob.snapshot == blob_snapshot_1['snapshot']:
self._assert_blob_is_soft_deleted(listedblob)
else:
self._assert_blob_not_soft_deleted(listedblob)
# list_blobs should not list soft deleted blob snapshots if Include(deleted=True) is not specified
blob_list = []
async for b in container.list_blobs(include='snapshots'):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 2)
# Restore snapshot with undelete
await blob.undelete_blob()
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for blob in blob_list:
self._assert_blob_not_soft_deleted(blob)
finally:
await self._disable_soft_delete()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_soft_delete_only_snapshots_of_blob(self, resource_group, location, storage_account, storage_account_key):
try:
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._enable_soft_delete()
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
blob_snapshot_1 = await blob.create_snapshot()
blob_snapshot_2 = await blob.create_snapshot()
# Soft delete all snapshots
await blob.delete_blob(delete_snapshots='only')
container = self.bsc.get_container_client(self.container_name)
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for listedblob in blob_list:
if listedblob.snapshot == blob_snapshot_1['snapshot']:
self._assert_blob_is_soft_deleted(listedblob)
elif listedblob.snapshot == blob_snapshot_2['snapshot']:
self._assert_blob_is_soft_deleted(listedblob)
else:
self._assert_blob_not_soft_deleted(listedblob)
# list_blobs should not list soft deleted blob snapshots if Include(deleted=True) is not specified
blob_list = []
async for b in container.list_blobs(include="snapshots"):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 1)
# Restore snapshots with undelete
await blob.undelete_blob()
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for blob in blob_list:
self._assert_blob_not_soft_deleted(blob)
finally:
await self._disable_soft_delete()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_soft_delete_blob_including_all_snapshots(self, resource_group, location, storage_account, storage_account_key):
try:
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._enable_soft_delete()
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
blob_snapshot_1 = await blob.create_snapshot()
blob_snapshot_2 = await blob.create_snapshot()
# Soft delete blob and all snapshots
await blob.delete_blob(delete_snapshots='include')
container = self.bsc.get_container_client(self.container_name)
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for listedblob in blob_list:
self._assert_blob_is_soft_deleted(listedblob)
# list_blobs should not list soft deleted blob snapshots if Include(deleted=True) is not specified
blob_list = []
async for b in container.list_blobs(include=["snapshots"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 0)
# Restore blob and snapshots with undelete
await blob.undelete_blob()
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for blob in blob_list:
self._assert_blob_not_soft_deleted(blob)
finally:
await self._disable_soft_delete()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_soft_delete_with_leased_blob(self, resource_group, location, storage_account, storage_account_key):
try:
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._enable_soft_delete()
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
# Soft delete the blob without lease_id should fail
with self.assertRaises(HttpResponseError):
await blob.delete_blob()
# Soft delete the blob
await blob.delete_blob(lease=lease)
container = self.bsc.get_container_client(self.container_name)
blob_list = []
async for b in container.list_blobs(include="deleted"):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 1)
self._assert_blob_is_soft_deleted(blob_list[0])
# list_blobs should not list soft deleted blobs if Include(deleted=True) is not specified
blob_list = []
async for b in container.list_blobs():
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 0)
# Restore blob with undelete, this also gets rid of the lease
await blob.undelete_blob()
blob_list = []
async for b in container.list_blobs(include="deleted"):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 1)
self._assert_blob_not_soft_deleted(blob_list[0])
finally:
await self._disable_soft_delete()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_copy_blob_with_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
sourceblob = '{0}/{1}/{2}'.format(
self.account_url(storage_account.name, "blob"), self.container_name, blob_name)
copyblob = self.bsc.get_blob_client(self.container_name, 'blob1copy')
copy = await copyblob.start_copy_from_url(sourceblob)
# Assert
self.assertIsNotNone(copy)
self.assertEqual(copy['copy_status'], 'success')
self.assertFalse(isinstance(copy['copy_status'], Enum))
self.assertIsNotNone(copy['copy_id'])
copy_content = await (await copyblob.download_blob()).readall()
self.assertEqual(copy_content, self.byte_data)
# @GlobalStorageAccountPreparer()
# @AsyncStorageTestCase.await_prepared_test
# TODO: external copy was supported since 2019-02-02
# async def test_copy_blob_with_external_blob_fails(self):
# # Arrange
# await self._setup()
# source_blob = "http://www.gutenberg.org/files/59466/59466-0.txt"
# copied_blob = self.bsc.get_blob_client(self.container_name, '59466-0.txt')
#
# # Act
# copy = await copied_blob.start_copy_from_url(source_blob)
# self.assertEqual(copy['copy_status'], 'pending')
# props = await self._wait_for_async_copy(copied_blob)
#
# # Assert
# self.assertEqual(props.copy.status_description, '500 InternalServerError "Copy failed."')
# self.assertEqual(props.copy.status, 'failed')
# self.assertIsNotNone(props.copy.id)
#
# @record
# def test_copy_blob_with_external_blob_fails(self):
# loop = asyncio.get_event_loop()
# loop.run_until_complete(self._test_copy_blob_with_external_blob_fails())
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_copy_blob_async_private_blob_no_sas(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob()
# Act
target_blob_name = 'targetblob'
target_blob = self.bsc.get_blob_client(self.container_name, target_blob_name)
# Assert
with self.assertRaises(ResourceNotFoundError):
await target_blob.start_copy_from_url(source_blob.url)
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_copy_blob_async_private_blob_with_sas(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
sas_token = generate_blob_sas(
source_blob.account_name,
source_blob.container_name,
source_blob.blob_name,
snapshot=source_blob.snapshot,
account_key=source_blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
blob = BlobClient.from_blob_url(source_blob.url, credential=sas_token)
# Act
target_blob_name = 'targetblob'
target_blob = self.bsc.get_blob_client(self.container_name, target_blob_name)
copy_resp = await target_blob.start_copy_from_url(blob.url)
# Assert
props = await self._wait_for_async_copy(target_blob)
self.assertEqual(props.copy.status, 'success')
actual_data = await (await target_blob.download_blob()).readall()
self.assertEqual(actual_data, data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_abort_copy_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
source_blob = "http://www.gutenberg.org/files/59466/59466-0.txt"
copied_blob = self.bsc.get_blob_client(self.container_name, '59466-0.txt')
# Act
copy = await copied_blob.start_copy_from_url(source_blob)
self.assertEqual(copy['copy_status'], 'pending')
await copied_blob.abort_copy(copy)
props = await self._wait_for_async_copy(copied_blob)
self.assertEqual(props.copy.status, 'aborted')
# Assert
actual_data = await copied_blob.download_blob()
bytes_data = await (await copied_blob.download_blob()).readall()
self.assertEqual(bytes_data, b"")
self.assertEqual(actual_data.properties.copy.status, 'aborted')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_abort_copy_blob_with_synchronous_copy_fails(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
source_blob_name = await self._create_block_blob()
source_blob = self.bsc.get_blob_client(self.container_name, source_blob_name)
# Act
target_blob_name = 'targetblob'
target_blob = self.bsc.get_blob_client(self.container_name, target_blob_name)
copy_resp = await target_blob.start_copy_from_url(source_blob.url)
with self.assertRaises(HttpResponseError):
await target_blob.abort_copy(copy_resp)
# Assert
self.assertEqual(copy_resp['copy_status'], 'success')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_snapshot_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
resp = await blob.create_snapshot()
# Assert
self.assertIsNotNone(resp)
self.assertIsNotNone(resp['snapshot'])
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_acquire_and_release(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
await lease.release()
lease2 = await blob.acquire_lease()
# Assert
self.assertIsNotNone(lease)
self.assertIsNotNone(lease2)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_with_duration(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease(lease_duration=15)
resp = await blob.upload_blob(b'hello 2', length=7, lease=lease)
self.sleep(15)
# Assert
with self.assertRaises(HttpResponseError):
await blob.upload_blob(b'hello 3', length=7, lease=lease)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_with_proposed_lease_id(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease_id = 'a0e6c241-96ea-45a3-a44b-6ae868bc14d0'
lease = await blob.acquire_lease(lease_id=lease_id)
# Assert
self.assertEqual(lease.id, lease_id)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_change_lease_id(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease_id = 'a0e6c241-96ea-45a3-a44b-6ae868bc14d0'
lease = await blob.acquire_lease()
first_lease_id = lease.id
await lease.change(lease_id)
await lease.renew()
# Assert
self.assertNotEqual(first_lease_id, lease.id)
self.assertEqual(lease.id, lease_id)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_break_period(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease(lease_duration=15)
lease_time = await lease.break_lease(lease_break_period=5)
resp = await blob.upload_blob(b'hello 2', length=7, lease=lease)
self.sleep(5)
with self.assertRaises(HttpResponseError):
await blob.upload_blob(b'hello 3', length=7, lease=lease)
# Assert
self.assertIsNotNone(lease.id)
self.assertIsNotNone(lease_time)
self.assertIsNotNone(resp.get('etag'))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_acquire_and_renew(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
first_id = lease.id
await lease.renew()
# Assert
self.assertEqual(first_id, lease.id)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_acquire_twice_fails(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
# Act
with self.assertRaises(HttpResponseError):
await blob.acquire_lease()
# Assert
self.assertIsNotNone(lease.id)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_unicode_get_blob_unicode_name(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = '啊齄丂狛狜'
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(b'hello world')
# Act
stream = await blob.download_blob()
content = await stream.readall()
# Assert
self.assertEqual(content, b'hello world')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_blob_unicode_data(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
data = u'hello world啊齄丂狛狜'
resp = await blob.upload_blob(data)
# Assert
self.assertIsNotNone(resp.get('etag'))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_no_sas_private_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
response = requests.get(blob.url)
# Assert
self.assertFalse(response.ok)
self.assertNotEqual(-1, response.text.find('ResourceNotFound'))
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_no_sas_public_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'a public blob can be read without a shared access signature'
blob_name = 'blob1.txt'
container_name = self._get_container_reference()
try:
container = await self.bsc.create_container(container_name, public_access='blob')
except ResourceExistsError:
container = self.bsc.get_container_client(container_name)
blob = await container.upload_blob(blob_name, data)
# Act
response = requests.get(blob.url)
# Assert
self.assertTrue(response.ok)
self.assertEqual(data, response.content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_public_access_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'public access blob'
blob_name = 'blob1.txt'
container_name = self._get_container_reference()
try:
container = await self.bsc.create_container(container_name, public_access='blob')
except ResourceExistsError:
container = self.bsc.get_container_client(container_name)
blob = await container.upload_blob(blob_name, data)
# Act
service = BlobClient.from_blob_url(blob.url)
# self._set_test_proxy(service, self.settings)
content = await (await service.download_blob()).readall()
# Assert
self.assertEqual(data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_sas_access_blob(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
# Act
service = BlobClient.from_blob_url(blob.url, credential=token)
# self._set_test_proxy(service, self.settings)
content = await (await service.download_blob()).readall()
# Assert
self.assertEqual(self.byte_data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_sas_signed_identifier(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
container = self.bsc.get_container_client(self.container_name)
blob = self.bsc.get_blob_client(self.container_name, blob_name)
access_policy = AccessPolicy()
access_policy.start = datetime.utcnow() - timedelta(hours=1)
access_policy.expiry = datetime.utcnow() + timedelta(hours=1)
access_policy.permission = BlobSasPermissions(read=True)
identifiers = {'testid': access_policy}
resp = await container.set_container_access_policy(identifiers)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
policy_id='testid')
# Act
service = BlobClient.from_blob_url(blob.url, credential=token)
# self._set_test_proxy(service, self.settings)
result = await (await service.download_blob()).readall()
# Assert
self.assertEqual(self.byte_data, result)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_account_sas(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
token = generate_account_sas(
self.bsc.account_name,
self.bsc.credential.account_key,
ResourceTypes(container=True, object=True),
AccountSasPermissions(read=True),
datetime.utcnow() + timedelta(hours=1),
)
# Act
blob = BlobClient(
self.bsc.url, container_name=self.container_name, blob_name=blob_name, credential=token)
container = ContainerClient(
self.bsc.url, container_name=self.container_name, credential=token)
await container.get_container_properties()
blob_response = requests.get(blob.url)
container_response = requests.get(container.url, params={'restype': 'container'})
# Assert
self.assertTrue(blob_response.ok)
self.assertEqual(self.byte_data, blob_response.content)
self.assertTrue(container_response.ok)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_token_credential(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
token_credential = self.generate_oauth_token()
# Action 1: make sure token works
service = BlobServiceClient(self.account_url(storage_account.name, "blob"), credential=token_credential, transport=AiohttpTestTransport())
result = await service.get_service_properties()
self.assertIsNotNone(result)
# Action 2: change token value to make request fail
fake_credential = self.generate_fake_token()
service = BlobServiceClient(self.account_url(storage_account.name, "blob"), credential=fake_credential, transport=AiohttpTestTransport())
with self.assertRaises(ClientAuthenticationError):
await service.get_service_properties()
# Action 3: update token to make it working again
service = BlobServiceClient(self.account_url(storage_account.name, "blob"), credential=token_credential, transport=AiohttpTestTransport())
result = await service.get_service_properties()
self.assertIsNotNone(result)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_shared_read_access_blob(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
await self._setup(storage_account.name, storage_account_key)
# Arrange
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
# Act
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
response = requests.get(sas_blob.url)
# Assert
response.raise_for_status()
self.assertTrue(response.ok)
self.assertEqual(self.byte_data, response.content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_shared_read_access_blob_with_content_query_params(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
cache_control='no-cache',
content_disposition='inline',
content_encoding='utf-8',
content_language='fr',
content_type='text',
)
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
# Act
response = requests.get(sas_blob.url)
# Assert
response.raise_for_status()
self.assertEqual(self.byte_data, response.content)
self.assertEqual(response.headers['cache-control'], 'no-cache')
self.assertEqual(response.headers['content-disposition'], 'inline')
self.assertEqual(response.headers['content-encoding'], 'utf-8')
self.assertEqual(response.headers['content-language'], 'fr')
self.assertEqual(response.headers['content-type'], 'text')
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_shared_write_access_blob(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
updated_data = b'updated blob data'
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(write=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
# Act
headers = {'x-ms-blob-type': 'BlockBlob'}
response = requests.put(sas_blob.url, headers=headers, data=updated_data)
# Assert
response.raise_for_status()
self.assertTrue(response.ok)
data = await (await blob.download_blob()).readall()
self.assertEqual(updated_data, data)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_shared_delete_access_blob(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
# Act
response = requests.delete(sas_blob.url)
# Assert
response.raise_for_status()
self.assertTrue(response.ok)
with self.assertRaises(HttpResponseError):
await sas_blob.download_blob()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_account_information(self, resource_group, location, storage_account, storage_account_key):
# Act
await self._setup(storage_account.name, storage_account_key)
info = await self.bsc.get_account_information()
# Assert
self.assertIsNotNone(info.get('sku_name'))
self.assertIsNotNone(info.get('account_kind'))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_account_information_with_container_name(self, resource_group, location, storage_account, storage_account_key):
# Act
# Container name gets ignored
await self._setup(storage_account.name, storage_account_key)
container = self.bsc.get_container_client("missing")
info = await container.get_account_information()
# Assert
self.assertIsNotNone(info.get('sku_name'))
self.assertIsNotNone(info.get('account_kind'))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_account_information_with_blob_name(self, resource_group, location, storage_account, storage_account_key):
# Act
# Both container and blob names get ignored
await self._setup(storage_account.name, storage_account_key)
blob = self.bsc.get_blob_client("missing", "missing")
info = await blob.get_account_information()
# Assert
self.assertIsNotNone(info.get('sku_name'))
self.assertIsNotNone(info.get('account_kind'))
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_account_information_with_container_sas(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
container = self.bsc.get_container_client(self.container_name)
token = generate_container_sas(
container.account_name,
container.container_name,
account_key=container.credential.account_key,
permission=ContainerSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
sas_container = ContainerClient.from_container_url(container.url, credential=token)
# Act
info = await sas_container.get_account_information()
# Assert
self.assertIsNotNone(info.get('sku_name'))
self.assertIsNotNone(info.get('account_kind'))
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_account_information_with_blob_sas(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
# Act
info = await sas_blob.get_account_information()
# Assert
self.assertIsNotNone(info.get('sku_name'))
self.assertIsNotNone(info.get('account_kind'))
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_download_to_file_with_sas(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
sas_token = generate_blob_sas(
source_blob.account_name,
source_blob.container_name,
source_blob.blob_name,
snapshot=source_blob.snapshot,
account_key=source_blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
FILE_PATH = '_to_file_with_sas.async.dat'
blob = BlobClient.from_blob_url(source_blob.url, credential=sas_token)
# Act
await download_blob_from_url(blob.url, FILE_PATH)
# Assert
with open(FILE_PATH, 'rb') as stream:
actual = stream.read()
self.assertEqual(data, actual)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_download_to_file_with_credential(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
FILE_PATH = 'to_file_with_credential.async.dat'
# Act
await download_blob_from_url(
source_blob.url, FILE_PATH,
max_concurrency=2,
credential=rmt_key)
# Assert
with open(FILE_PATH, 'rb') as stream:
actual = stream.read()
self.assertEqual(data, actual)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_download_to_stream_with_credential(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
FILE_PATH = 'to_stream_with_credential.async.dat'
# Act
with open(FILE_PATH, 'wb') as stream:
await download_blob_from_url(
source_blob.url, stream,
max_concurrency=2,
credential=rmt_key)
# Assert
with open(FILE_PATH, 'rb') as stream:
actual = stream.read()
self.assertEqual(data, actual)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_download_to_file_with_existing_file(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
FILE_PATH = 'with_existing_file.async.dat'
# Act
await download_blob_from_url(
source_blob.url, FILE_PATH,
credential=rmt_key)
with self.assertRaises(ValueError):
await download_blob_from_url(source_blob.url, FILE_PATH)
# Assert
with open(FILE_PATH, 'rb') as stream:
actual = stream.read()
self.assertEqual(data, actual)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_download_to_file_with_existing_file_overwrite(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
FILE_PATH = 'existing_file_overwrite.async.dat'
# Act
await download_blob_from_url(
source_blob.url, FILE_PATH,
credential=rmt_key)
data2 = b'ABCDEFGH' * 1024 * 1024
source_blob = await self._create_remote_block_blob(blob_data=data2)
await download_blob_from_url(
source_blob.url, FILE_PATH, overwrite=True,
credential=rmt_key)
# Assert
with open(FILE_PATH, 'rb') as stream:
actual = stream.read()
self.assertEqual(data2, actual)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_bytes_with_sas(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(write=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
# Act
uploaded = await upload_blob_to_url(sas_blob.url, data)
# Assert
self.assertIsNotNone(uploaded)
content = await (await blob.download_blob()).readall()
self.assertEqual(data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_bytes_with_credential(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
uploaded = await upload_blob_to_url(
blob.url, data, credential=storage_account_key)
# Assert
self.assertIsNotNone(uploaded)
content = await (await blob.download_blob()).readall()
self.assertEqual(data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_bytes_with_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(b"existing_data")
# Act
with self.assertRaises(ResourceExistsError):
await upload_blob_to_url(
blob.url, data, credential=storage_account_key)
# Assert
content = await (await blob.download_blob()).readall()
self.assertEqual(b"existing_data", content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_bytes_with_existing_blob_overwrite(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(b"existing_data")
# Act
uploaded = await upload_blob_to_url(
blob.url, data,
overwrite=True,
credential=storage_account_key)
# Assert
self.assertIsNotNone(uploaded)
content = await (await blob.download_blob()).readall()
self.assertEqual(data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_text_with_credential(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = '12345678' * 1024 * 1024
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
uploaded = await upload_blob_to_url(
blob.url, data, credential=storage_account_key)
# Assert
self.assertIsNotNone(uploaded)
stream = await blob.download_blob(encoding='UTF-8')
content = await stream.readall()
self.assertEqual(data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_file_with_credential(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
FILE_PATH = 'url_file_with_credential.async.dat'
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
with open(FILE_PATH, 'rb'):
uploaded = await upload_blob_to_url(
blob.url, data, credential=storage_account_key)
# Assert
self.assertIsNotNone(uploaded)
content = await (await blob.download_blob()).readall()
self.assertEqual(data, content)
self._teardown(FILE_PATH)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_transport_closed_only_once(self, resource_group, location, storage_account, storage_account_key):
container_name = self.get_resource_name('utcontainerasync')
transport = AioHttpTransport()
bsc = BlobServiceClient(self.account_url(storage_account.name, "blob"), credential=storage_account_key, transport=transport)
blob_name = self._get_blob_reference()
async with bsc:
await bsc.get_service_properties()
assert transport.session is not None
async with bsc.get_blob_client(container_name, blob_name) as bc:
assert transport.session is not None
await bsc.get_service_properties()
assert transport.session is not None
# ------------------------------------------------------------------------------
| 40.261036
| 149
| 0.685593
|
from enum import Enum
import pytest
import aiohttp
import asyncio
import requests
import time
import unittest
import os
from datetime import datetime, timedelta
from azure.core.exceptions import (
HttpResponseError,
ResourceNotFoundError,
ResourceExistsError,
ClientAuthenticationError)
from azure.core.pipeline.transport import AsyncioRequestsTransport
from azure.core.pipeline.transport import AioHttpTransport
from multidict import CIMultiDict, CIMultiDictProxy
from azure.storage.blob.aio import (
BlobServiceClient,
ContainerClient,
BlobClient,
upload_blob_to_url,
download_blob_from_url,
)
from azure.storage.blob import (
generate_blob_sas,
generate_account_sas,
generate_container_sas,
BlobType,
StorageErrorCode,
BlobSasPermissions,
ContainerSasPermissions,
ContentSettings,
BlobProperties,
RetentionPolicy,
AccessPolicy,
ResourceTypes,
AccountSasPermissions,
StandardBlobTier)
from devtools_testutils import ResourceGroupPreparer, StorageAccountPreparer
from _shared.testcase import GlobalStorageAccountPreparer
from _shared.asynctestcase import AsyncStorageTestCase
TEST_CONTAINER_PREFIX = 'container'
TEST_BLOB_PREFIX = 'blob'
class AiohttpTestTransport(AioHttpTransport):
async def send(self, request, **config):
response = await super(AiohttpTestTransport, self).send(request, **config)
if not isinstance(response.headers, CIMultiDictProxy):
response.headers = CIMultiDictProxy(CIMultiDict(response.internal_response.headers))
response.content_type = response.headers.get("content-type")
return response
class StorageCommonBlobTestAsync(AsyncStorageTestCase):
async def _setup(self, name, key):
self.bsc = BlobServiceClient(self.account_url(name, "blob"), credential=key, transport=AiohttpTestTransport())
self.container_name = self.get_resource_name('utcontainer')
self.byte_data = self.get_random_bytes(1024)
if self.is_live:
container = self.bsc.get_container_client(self.container_name)
try:
await container.create_container(timeout=5)
except ResourceExistsError:
pass
async def _setup_remote(self, name, key):
self.bsc2 = BlobServiceClient(self.account_url(name, "blob"), credential=key)
self.remote_container_name = 'rmt'
def _teardown(self, FILE_PATH):
if os.path.isfile(FILE_PATH):
try:
os.remove(FILE_PATH)
except:
pass
def _get_container_reference(self):
return self.get_resource_name(TEST_CONTAINER_PREFIX)
def _get_blob_reference(self):
return self.get_resource_name(TEST_BLOB_PREFIX)
async def _create_block_blob(self):
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(self.byte_data, length=len(self.byte_data))
return blob_name
async def _create_remote_container(self):
self.remote_container_name = self.get_resource_name('remotectnr')
remote_container = self.bsc2.get_container_client(self.remote_container_name)
try:
await remote_container.create_container()
except ResourceExistsError:
pass
async def _create_remote_block_blob(self, blob_data=None):
if not blob_data:
blob_data = b'12345678' * 1024 * 1024
source_blob_name = self._get_blob_reference()
source_blob = self.bsc2.get_blob_client(self.remote_container_name, source_blob_name)
await source_blob.upload_blob(blob_data, overwrite=True)
return source_blob
async def _wait_for_async_copy(self, blob):
count = 0
props = await blob.get_blob_properties()
while props.copy.status == 'pending':
count = count + 1
if count > 10:
self.fail('Timed out waiting for async copy to complete.')
self.sleep(6)
props = await blob.get_blob_properties()
return props
async def _enable_soft_delete(self):
delete_retention_policy = RetentionPolicy(enabled=True, days=2)
await self.bsc.set_service_properties(delete_retention_policy=delete_retention_policy)
if self.is_live:
time.sleep(30)
async def _disable_soft_delete(self):
delete_retention_policy = RetentionPolicy(enabled=False)
await self.bsc.set_service_properties(delete_retention_policy=delete_retention_policy)
def _assert_blob_is_soft_deleted(self, blob):
self.assertTrue(blob.deleted)
self.assertIsNotNone(blob.deleted_time)
self.assertIsNotNone(blob.remaining_retention_days)
def _assert_blob_not_soft_deleted(self, blob):
self.assertFalse(blob.deleted)
self.assertIsNone(blob.deleted_time)
self.assertIsNone(blob.remaining_retention_days)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_blob_exists(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
exists = await blob.get_blob_properties()
self.assertTrue(exists)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_blob_not_exists(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
with self.assertRaises(ResourceNotFoundError):
await blob.get_blob_properties()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_blob_snapshot_exists(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
snapshot = await blob.create_snapshot()
blob = self.bsc.get_blob_client(self.container_name, blob_name, snapshot=snapshot)
exists = await blob.get_blob_properties()
self.assertTrue(exists)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_blob_snapshot_not_exists(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name, snapshot="1988-08-18T07:52:31.6690068Z")
with self.assertRaises(ResourceNotFoundError):
await blob.get_blob_properties()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_blob_container_not_exists(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self._get_container_reference(), blob_name)
with self.assertRaises(ResourceNotFoundError):
await blob.get_blob_properties()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_question_mark(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
blob_name = '?ques?tion?'
blob_data = u'???'
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(blob_data)
stream = await blob.download_blob()
data = await stream.readall()
self.assertIsNotNone(data)
content = data.decode('utf-8')
self.assertEqual(content, blob_data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_special_chars(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
for c in '-._ /()$=\',~':
blob_name = '{0}a{0}a{0}'.format(c)
blob_data = c
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(blob_data, length=len(blob_data))
data = await (await blob.download_blob()).readall()
content = data.decode('utf-8')
self.assertEqual(content, blob_data)
# Assert
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_lease_id(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
# Act
data = b'hello world again'
resp = await blob.upload_blob(data, length=len(data), lease=lease)
# Assert
self.assertIsNotNone(resp.get('etag'))
stream = await blob.download_blob(lease=lease)
content = await stream.readall()
self.assertEqual(content, data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_metadata(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
metadata = {'hello': 'world', 'number': '42'}
# Act
data = b'hello world'
blob = self.bsc.get_blob_client(self.container_name, blob_name)
resp = await blob.upload_blob(data, length=len(data), metadata=metadata)
# Assert
self.assertIsNotNone(resp.get('etag'))
md = (await blob.get_blob_properties()).metadata
self.assertDictEqual(md, metadata)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_generator_async(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
# Act
def gen():
yield "hello"
yield "world!"
yield " eom"
blob = self.bsc.get_blob_client(self.container_name, "gen_blob")
resp = await blob.upload_blob(data=gen())
# Assert
self.assertIsNotNone(resp.get('etag'))
content = await (await blob.download_blob()).readall()
self.assertEqual(content, b"helloworld! eom")
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_requests_async(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
# Act
uri = "http://www.gutenberg.org/files/59466/59466-0.txt"
data = requests.get(uri, stream=True)
blob = self.bsc.get_blob_client(self.container_name, "gutenberg")
resp = await blob.upload_blob(data=data.raw)
self.assertIsNotNone(resp.get('etag'))
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_aiohttp_async(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
blob = self.bsc.get_blob_client(self.container_name, "gutenberg")
# Act
uri = "http://www.gutenberg.org/files/59466/59466-0.txt"
async with aiohttp.ClientSession() as session:
async with session.get(uri) as data:
async for text, _ in data.content.iter_chunks():
resp = await blob.upload_blob(data=text)
self.assertIsNotNone(resp.get('etag'))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
stream = await blob.download_blob()
content = await stream.readall()
# Assert
self.assertEqual(content, self.byte_data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_snapshot(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
snap = await blob.create_snapshot()
snapshot = self.bsc.get_blob_client(
self.container_name, blob_name, snapshot=snap)
# Act
stream = await snapshot.download_blob()
content = await stream.readall()
# Assert
self.assertEqual(content, self.byte_data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_snapshot_previous(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
snap = await blob.create_snapshot()
snapshot = self.bsc.get_blob_client(
self.container_name, blob_name, snapshot=snap)
upload_data = b'hello world again'
await blob.upload_blob(upload_data, length=len(upload_data), overwrite=True)
# Act
blob_previous = await snapshot.download_blob()
blob_previous_bytes = await blob_previous.readall()
blob_latest = await blob.download_blob()
blob_latest_bytes = await blob_latest.readall()
# Assert
self.assertEqual(blob_previous_bytes, self.byte_data)
self.assertEqual(blob_latest_bytes, b'hello world again')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_range(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
stream = await blob.download_blob(offset=0, length=5)
content = await stream.readall()
# Assert
self.assertEqual(content, self.byte_data[:5])
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_lease(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
# Act
stream = await blob.download_blob(lease=lease)
content = await stream.readall()
await lease.release()
# Assert
self.assertEqual(content, self.byte_data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_with_non_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
with self.assertRaises(ResourceNotFoundError):
await blob.download_blob()
# Assert
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_set_blob_properties_with_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.set_http_headers(
content_settings=ContentSettings(
content_language='spanish',
content_disposition='inline'),
)
# Assert
props = await blob.get_blob_properties()
self.assertEqual(props.content_settings.content_language, 'spanish')
self.assertEqual(props.content_settings.content_disposition, 'inline')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_set_blob_properties_with_blob_settings_param(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
props = await blob.get_blob_properties()
# Act
props.content_settings.content_language = 'spanish'
props.content_settings.content_disposition = 'inline'
await blob.set_http_headers(content_settings=props.content_settings)
# Assert
props = await blob.get_blob_properties()
self.assertEqual(props.content_settings.content_language, 'spanish')
self.assertEqual(props.content_settings.content_disposition, 'inline')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_properties(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
props = await blob.get_blob_properties()
# Assert
self.assertIsInstance(props, BlobProperties)
self.assertEqual(props.blob_type, BlobType.BlockBlob)
self.assertEqual(props.size, len(self.byte_data))
self.assertEqual(props.lease.status, 'unlocked')
self.assertIsNotNone(props.creation_time)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_properties_fail(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name, snapshot=1)
with self.assertRaises(HttpResponseError) as e:
await blob.get_blob_properties() # Invalid snapshot value of 1
# Assert
# TODO: No error code returned
# self.assertEqual(StorageErrorCode.invalid_query_parameter_value, e.exception.error_code)
# This test is to validate that the ErrorCode is retrieved from the header during a
# GET request. This is preferred to relying on the ErrorCode in the body.
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_metadata_fail(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name, snapshot=1)
with self.assertRaises(HttpResponseError) as e:
(await blob.get_blob_properties()).metadata # Invalid snapshot value of 1
# Assert
# TODO: No error code returned
# self.assertEqual(StorageErrorCode.invalid_query_parameter_value, e.exception.error_code)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_server_encryption(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = await blob.download_blob()
# Assert
self.assertTrue(data.properties.server_encrypted)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_properties_server_encryption(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
props = await blob.get_blob_properties()
# Assert
self.assertTrue(props.server_encrypted)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_list_blobs_server_encryption(self, resource_group, location, storage_account, storage_account_key):
# test can only run live
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._create_block_blob()
container = self.bsc.get_container_client(self.container_name)
blob_list = []
async for b in container.list_blobs():
blob_list.append(b)
# Act
# Assert
for blob in blob_list:
self.assertTrue(blob.server_encrypted)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_no_server_encryption(self, resource_group, location, storage_account, storage_account_key):
pytest.skip("Aiohttp headers dict (CIMultiDictProxy) is immutable.")
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
def callback(response):
response.http_response.headers['x-ms-server-encrypted'] = 'false'
props = await blob.get_blob_properties(raw_response_hook=callback)
# Assert
self.assertFalse(props.server_encrypted)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_properties_with_snapshot(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
container = self.bsc.get_container_client(self.container_name)
blob = self.bsc.get_blob_client(self.container_name, blob_name)
res = await blob.create_snapshot()
blobs = []
async for b in container.list_blobs(include='snapshots'):
blobs.append(b)
self.assertEqual(len(blobs), 2)
# Act
snapshot = self.bsc.get_blob_client(self.container_name, blob_name, snapshot=res)
props = await snapshot.get_blob_properties()
# Assert
self.assertIsNotNone(blob)
self.assertEqual(props.blob_type, BlobType.BlockBlob)
self.assertEqual(props.size, len(self.byte_data))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_properties_with_leased_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
# Act
props = await blob.get_blob_properties()
# Assert
self.assertIsInstance(props, BlobProperties)
self.assertEqual(props.blob_type, BlobType.BlockBlob)
self.assertEqual(props.size, len(self.byte_data))
self.assertEqual(props.lease.status, 'locked')
self.assertEqual(props.lease.state, 'leased')
self.assertEqual(props.lease.duration, 'infinite')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_blob_metadata(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
md = (await blob.get_blob_properties()).metadata
# Assert
self.assertIsNotNone(md)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_set_blob_metadata_with_upper_case(self, resource_group, location, storage_account, storage_account_key):
# bug in devtools...converts upper case header to lowercase
# passes live.
# Arrange
await self._setup(storage_account.name, storage_account_key)
metadata = {'hello': 'world', 'number': '42', 'UP': 'UPval'}
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.set_blob_metadata(metadata)
# Assert
md = (await blob.get_blob_properties()).metadata
self.assertEqual(3, len(md))
self.assertEqual(md['hello'], 'world')
self.assertEqual(md['number'], '42')
self.assertEqual(md['UP'], 'UPval')
self.assertFalse('up' in md)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_delete_blob_with_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
resp = await blob.delete_blob()
# Assert
self.assertIsNone(resp)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_delete_blob_with_non_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
with self.assertRaises(ResourceNotFoundError):
await blob.delete_blob()
# Assert
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_delete_blob_snapshot(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
snap = await blob.create_snapshot()
snapshot = self.bsc.get_blob_client(
self.container_name, blob_name, snapshot=snap)
# Act
await snapshot.delete_blob()
# Assert
container = self.bsc.get_container_client(self.container_name)
blobs = []
async for b in container.list_blobs(include='snapshots'):
blobs.append(b)
self.assertEqual(len(blobs), 1)
self.assertEqual(blobs[0].name, blob_name)
self.assertIsNone(blobs[0].snapshot)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_delete_blob_snapshots(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.create_snapshot()
# Act
await blob.delete_blob(delete_snapshots='only')
# Assert
container = self.bsc.get_container_client(self.container_name)
blobs = []
async for b in container.list_blobs(include='snapshots'):
blobs.append(b)
self.assertEqual(len(blobs), 1)
self.assertIsNone(blobs[0].snapshot)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_delete_blob_with_snapshots(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.create_snapshot()
# Act
# with self.assertRaises(HttpResponseError):
# blob.delete_blob()
await blob.delete_blob(delete_snapshots='include')
# Assert
container = self.bsc.get_container_client(self.container_name)
blobs = []
async for b in container.list_blobs(include='snapshots'):
blobs.append(b)
self.assertEqual(len(blobs), 0)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_soft_delete_blob_without_snapshots(self, resource_group, location, storage_account, storage_account_key):
try:
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._enable_soft_delete()
blob_name = await self._create_block_blob()
container = self.bsc.get_container_client(self.container_name)
blob = container.get_blob_client(blob_name)
# Soft delete the blob
await blob.delete_blob()
blob_list = []
async for b in container.list_blobs(include='deleted'):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 1)
self._assert_blob_is_soft_deleted(blob_list[0])
# list_blobs should not list soft deleted blobs if Include(deleted=True) is not specified
blob_list = []
async for b in container.list_blobs():
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 0)
# Restore blob with undelete
await blob.undelete_blob()
blob_list = []
async for b in container.list_blobs(include='deleted'):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 1)
self._assert_blob_not_soft_deleted(blob_list[0])
finally:
await self._disable_soft_delete()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_soft_delete_single_blob_snapshot(self, resource_group, location, storage_account, storage_account_key):
try:
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._enable_soft_delete()
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
blob_snapshot_1 = await blob.create_snapshot()
blob_snapshot_2 = await blob.create_snapshot()
# Soft delete blob_snapshot_1
snapshot_1 = self.bsc.get_blob_client(
self.container_name, blob_name, snapshot=blob_snapshot_1)
await snapshot_1.delete_blob()
with self.assertRaises(ValueError):
await snapshot_1.delete_blob(delete_snapshots='only')
container = self.bsc.get_container_client(self.container_name)
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for listedblob in blob_list:
if listedblob.snapshot == blob_snapshot_1['snapshot']:
self._assert_blob_is_soft_deleted(listedblob)
else:
self._assert_blob_not_soft_deleted(listedblob)
# list_blobs should not list soft deleted blob snapshots if Include(deleted=True) is not specified
blob_list = []
async for b in container.list_blobs(include='snapshots'):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 2)
# Restore snapshot with undelete
await blob.undelete_blob()
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for blob in blob_list:
self._assert_blob_not_soft_deleted(blob)
finally:
await self._disable_soft_delete()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_soft_delete_only_snapshots_of_blob(self, resource_group, location, storage_account, storage_account_key):
try:
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._enable_soft_delete()
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
blob_snapshot_1 = await blob.create_snapshot()
blob_snapshot_2 = await blob.create_snapshot()
# Soft delete all snapshots
await blob.delete_blob(delete_snapshots='only')
container = self.bsc.get_container_client(self.container_name)
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for listedblob in blob_list:
if listedblob.snapshot == blob_snapshot_1['snapshot']:
self._assert_blob_is_soft_deleted(listedblob)
elif listedblob.snapshot == blob_snapshot_2['snapshot']:
self._assert_blob_is_soft_deleted(listedblob)
else:
self._assert_blob_not_soft_deleted(listedblob)
# list_blobs should not list soft deleted blob snapshots if Include(deleted=True) is not specified
blob_list = []
async for b in container.list_blobs(include="snapshots"):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 1)
# Restore snapshots with undelete
await blob.undelete_blob()
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for blob in blob_list:
self._assert_blob_not_soft_deleted(blob)
finally:
await self._disable_soft_delete()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_soft_delete_blob_including_all_snapshots(self, resource_group, location, storage_account, storage_account_key):
try:
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._enable_soft_delete()
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
blob_snapshot_1 = await blob.create_snapshot()
blob_snapshot_2 = await blob.create_snapshot()
# Soft delete blob and all snapshots
await blob.delete_blob(delete_snapshots='include')
container = self.bsc.get_container_client(self.container_name)
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for listedblob in blob_list:
self._assert_blob_is_soft_deleted(listedblob)
# list_blobs should not list soft deleted blob snapshots if Include(deleted=True) is not specified
blob_list = []
async for b in container.list_blobs(include=["snapshots"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 0)
# Restore blob and snapshots with undelete
await blob.undelete_blob()
blob_list = []
async for b in container.list_blobs(include=["snapshots", "deleted"]):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 3)
for blob in blob_list:
self._assert_blob_not_soft_deleted(blob)
finally:
await self._disable_soft_delete()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_soft_delete_with_leased_blob(self, resource_group, location, storage_account, storage_account_key):
try:
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._enable_soft_delete()
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
# Soft delete the blob without lease_id should fail
with self.assertRaises(HttpResponseError):
await blob.delete_blob()
# Soft delete the blob
await blob.delete_blob(lease=lease)
container = self.bsc.get_container_client(self.container_name)
blob_list = []
async for b in container.list_blobs(include="deleted"):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 1)
self._assert_blob_is_soft_deleted(blob_list[0])
# list_blobs should not list soft deleted blobs if Include(deleted=True) is not specified
blob_list = []
async for b in container.list_blobs():
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 0)
# Restore blob with undelete, this also gets rid of the lease
await blob.undelete_blob()
blob_list = []
async for b in container.list_blobs(include="deleted"):
blob_list.append(b)
# Assert
self.assertEqual(len(blob_list), 1)
self._assert_blob_not_soft_deleted(blob_list[0])
finally:
await self._disable_soft_delete()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_copy_blob_with_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
sourceblob = '{0}/{1}/{2}'.format(
self.account_url(storage_account.name, "blob"), self.container_name, blob_name)
copyblob = self.bsc.get_blob_client(self.container_name, 'blob1copy')
copy = await copyblob.start_copy_from_url(sourceblob)
# Assert
self.assertIsNotNone(copy)
self.assertEqual(copy['copy_status'], 'success')
self.assertFalse(isinstance(copy['copy_status'], Enum))
self.assertIsNotNone(copy['copy_id'])
copy_content = await (await copyblob.download_blob()).readall()
self.assertEqual(copy_content, self.byte_data)
# @GlobalStorageAccountPreparer()
# @AsyncStorageTestCase.await_prepared_test
# TODO: external copy was supported since 2019-02-02
# async def test_copy_blob_with_external_blob_fails(self):
# # Arrange
# await self._setup()
# source_blob = "http://www.gutenberg.org/files/59466/59466-0.txt"
# copied_blob = self.bsc.get_blob_client(self.container_name, '59466-0.txt')
#
# # Act
# copy = await copied_blob.start_copy_from_url(source_blob)
# self.assertEqual(copy['copy_status'], 'pending')
# props = await self._wait_for_async_copy(copied_blob)
#
# # Assert
# self.assertEqual(props.copy.status_description, '500 InternalServerError "Copy failed."')
# self.assertEqual(props.copy.status, 'failed')
# self.assertIsNotNone(props.copy.id)
#
# @record
# def test_copy_blob_with_external_blob_fails(self):
# loop = asyncio.get_event_loop()
# loop.run_until_complete(self._test_copy_blob_with_external_blob_fails())
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_copy_blob_async_private_blob_no_sas(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob()
# Act
target_blob_name = 'targetblob'
target_blob = self.bsc.get_blob_client(self.container_name, target_blob_name)
# Assert
with self.assertRaises(ResourceNotFoundError):
await target_blob.start_copy_from_url(source_blob.url)
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_copy_blob_async_private_blob_with_sas(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
sas_token = generate_blob_sas(
source_blob.account_name,
source_blob.container_name,
source_blob.blob_name,
snapshot=source_blob.snapshot,
account_key=source_blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
blob = BlobClient.from_blob_url(source_blob.url, credential=sas_token)
# Act
target_blob_name = 'targetblob'
target_blob = self.bsc.get_blob_client(self.container_name, target_blob_name)
copy_resp = await target_blob.start_copy_from_url(blob.url)
# Assert
props = await self._wait_for_async_copy(target_blob)
self.assertEqual(props.copy.status, 'success')
actual_data = await (await target_blob.download_blob()).readall()
self.assertEqual(actual_data, data)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_abort_copy_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
source_blob = "http://www.gutenberg.org/files/59466/59466-0.txt"
copied_blob = self.bsc.get_blob_client(self.container_name, '59466-0.txt')
# Act
copy = await copied_blob.start_copy_from_url(source_blob)
self.assertEqual(copy['copy_status'], 'pending')
await copied_blob.abort_copy(copy)
props = await self._wait_for_async_copy(copied_blob)
self.assertEqual(props.copy.status, 'aborted')
# Assert
actual_data = await copied_blob.download_blob()
bytes_data = await (await copied_blob.download_blob()).readall()
self.assertEqual(bytes_data, b"")
self.assertEqual(actual_data.properties.copy.status, 'aborted')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_abort_copy_blob_with_synchronous_copy_fails(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
source_blob_name = await self._create_block_blob()
source_blob = self.bsc.get_blob_client(self.container_name, source_blob_name)
# Act
target_blob_name = 'targetblob'
target_blob = self.bsc.get_blob_client(self.container_name, target_blob_name)
copy_resp = await target_blob.start_copy_from_url(source_blob.url)
with self.assertRaises(HttpResponseError):
await target_blob.abort_copy(copy_resp)
# Assert
self.assertEqual(copy_resp['copy_status'], 'success')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_snapshot_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
resp = await blob.create_snapshot()
# Assert
self.assertIsNotNone(resp)
self.assertIsNotNone(resp['snapshot'])
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_acquire_and_release(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
await lease.release()
lease2 = await blob.acquire_lease()
# Assert
self.assertIsNotNone(lease)
self.assertIsNotNone(lease2)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_with_duration(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease(lease_duration=15)
resp = await blob.upload_blob(b'hello 2', length=7, lease=lease)
self.sleep(15)
# Assert
with self.assertRaises(HttpResponseError):
await blob.upload_blob(b'hello 3', length=7, lease=lease)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_with_proposed_lease_id(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease_id = 'a0e6c241-96ea-45a3-a44b-6ae868bc14d0'
lease = await blob.acquire_lease(lease_id=lease_id)
# Assert
self.assertEqual(lease.id, lease_id)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_change_lease_id(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease_id = 'a0e6c241-96ea-45a3-a44b-6ae868bc14d0'
lease = await blob.acquire_lease()
first_lease_id = lease.id
await lease.change(lease_id)
await lease.renew()
# Assert
self.assertNotEqual(first_lease_id, lease.id)
self.assertEqual(lease.id, lease_id)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_break_period(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease(lease_duration=15)
lease_time = await lease.break_lease(lease_break_period=5)
resp = await blob.upload_blob(b'hello 2', length=7, lease=lease)
self.sleep(5)
with self.assertRaises(HttpResponseError):
await blob.upload_blob(b'hello 3', length=7, lease=lease)
# Assert
self.assertIsNotNone(lease.id)
self.assertIsNotNone(lease_time)
self.assertIsNotNone(resp.get('etag'))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_acquire_and_renew(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
first_id = lease.id
await lease.renew()
# Assert
self.assertEqual(first_id, lease.id)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_lease_blob_acquire_twice_fails(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
lease = await blob.acquire_lease()
# Act
with self.assertRaises(HttpResponseError):
await blob.acquire_lease()
# Assert
self.assertIsNotNone(lease.id)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_unicode_get_blob_unicode_name(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = '啊齄丂狛狜'
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(b'hello world')
# Act
stream = await blob.download_blob()
content = await stream.readall()
# Assert
self.assertEqual(content, b'hello world')
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_blob_unicode_data(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
data = u'hello world啊齄丂狛狜'
resp = await blob.upload_blob(data)
# Assert
self.assertIsNotNone(resp.get('etag'))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_no_sas_private_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
response = requests.get(blob.url)
# Assert
self.assertFalse(response.ok)
self.assertNotEqual(-1, response.text.find('ResourceNotFound'))
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_no_sas_public_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'a public blob can be read without a shared access signature'
blob_name = 'blob1.txt'
container_name = self._get_container_reference()
try:
container = await self.bsc.create_container(container_name, public_access='blob')
except ResourceExistsError:
container = self.bsc.get_container_client(container_name)
blob = await container.upload_blob(blob_name, data)
# Act
response = requests.get(blob.url)
# Assert
self.assertTrue(response.ok)
self.assertEqual(data, response.content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_public_access_blob(self, resource_group, location, storage_account, storage_account_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'public access blob'
blob_name = 'blob1.txt'
container_name = self._get_container_reference()
try:
container = await self.bsc.create_container(container_name, public_access='blob')
except ResourceExistsError:
container = self.bsc.get_container_client(container_name)
blob = await container.upload_blob(blob_name, data)
# Act
service = BlobClient.from_blob_url(blob.url)
# self._set_test_proxy(service, self.settings)
content = await (await service.download_blob()).readall()
# Assert
self.assertEqual(data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_sas_access_blob(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
# Act
service = BlobClient.from_blob_url(blob.url, credential=token)
# self._set_test_proxy(service, self.settings)
content = await (await service.download_blob()).readall()
# Assert
self.assertEqual(self.byte_data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_sas_signed_identifier(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
container = self.bsc.get_container_client(self.container_name)
blob = self.bsc.get_blob_client(self.container_name, blob_name)
access_policy = AccessPolicy()
access_policy.start = datetime.utcnow() - timedelta(hours=1)
access_policy.expiry = datetime.utcnow() + timedelta(hours=1)
access_policy.permission = BlobSasPermissions(read=True)
identifiers = {'testid': access_policy}
resp = await container.set_container_access_policy(identifiers)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
policy_id='testid')
# Act
service = BlobClient.from_blob_url(blob.url, credential=token)
# self._set_test_proxy(service, self.settings)
result = await (await service.download_blob()).readall()
# Assert
self.assertEqual(self.byte_data, result)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_account_sas(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
token = generate_account_sas(
self.bsc.account_name,
self.bsc.credential.account_key,
ResourceTypes(container=True, object=True),
AccountSasPermissions(read=True),
datetime.utcnow() + timedelta(hours=1),
)
# Act
blob = BlobClient(
self.bsc.url, container_name=self.container_name, blob_name=blob_name, credential=token)
container = ContainerClient(
self.bsc.url, container_name=self.container_name, credential=token)
await container.get_container_properties()
blob_response = requests.get(blob.url)
container_response = requests.get(container.url, params={'restype': 'container'})
# Assert
self.assertTrue(blob_response.ok)
self.assertEqual(self.byte_data, blob_response.content)
self.assertTrue(container_response.ok)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_token_credential(self, resource_group, location, storage_account, storage_account_key):
await self._setup(storage_account.name, storage_account_key)
token_credential = self.generate_oauth_token()
# Action 1: make sure token works
service = BlobServiceClient(self.account_url(storage_account.name, "blob"), credential=token_credential, transport=AiohttpTestTransport())
result = await service.get_service_properties()
self.assertIsNotNone(result)
# Action 2: change token value to make request fail
fake_credential = self.generate_fake_token()
service = BlobServiceClient(self.account_url(storage_account.name, "blob"), credential=fake_credential, transport=AiohttpTestTransport())
with self.assertRaises(ClientAuthenticationError):
await service.get_service_properties()
# Action 3: update token to make it working again
service = BlobServiceClient(self.account_url(storage_account.name, "blob"), credential=token_credential, transport=AiohttpTestTransport())
result = await service.get_service_properties()
self.assertIsNotNone(result)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_shared_read_access_blob(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
await self._setup(storage_account.name, storage_account_key)
# Arrange
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
# Act
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
response = requests.get(sas_blob.url)
# Assert
response.raise_for_status()
self.assertTrue(response.ok)
self.assertEqual(self.byte_data, response.content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_shared_read_access_blob_with_content_query_params(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
cache_control='no-cache',
content_disposition='inline',
content_encoding='utf-8',
content_language='fr',
content_type='text',
)
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
# Act
response = requests.get(sas_blob.url)
# Assert
response.raise_for_status()
self.assertEqual(self.byte_data, response.content)
self.assertEqual(response.headers['cache-control'], 'no-cache')
self.assertEqual(response.headers['content-disposition'], 'inline')
self.assertEqual(response.headers['content-encoding'], 'utf-8')
self.assertEqual(response.headers['content-language'], 'fr')
self.assertEqual(response.headers['content-type'], 'text')
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_shared_write_access_blob(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
updated_data = b'updated blob data'
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(write=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
# Act
headers = {'x-ms-blob-type': 'BlockBlob'}
response = requests.put(sas_blob.url, headers=headers, data=updated_data)
# Assert
response.raise_for_status()
self.assertTrue(response.ok)
data = await (await blob.download_blob()).readall()
self.assertEqual(updated_data, data)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_shared_delete_access_blob(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(delete=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
# Act
response = requests.delete(sas_blob.url)
# Assert
response.raise_for_status()
self.assertTrue(response.ok)
with self.assertRaises(HttpResponseError):
await sas_blob.download_blob()
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_account_information(self, resource_group, location, storage_account, storage_account_key):
# Act
await self._setup(storage_account.name, storage_account_key)
info = await self.bsc.get_account_information()
# Assert
self.assertIsNotNone(info.get('sku_name'))
self.assertIsNotNone(info.get('account_kind'))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_account_information_with_container_name(self, resource_group, location, storage_account, storage_account_key):
# Act
# Container name gets ignored
await self._setup(storage_account.name, storage_account_key)
container = self.bsc.get_container_client("missing")
info = await container.get_account_information()
# Assert
self.assertIsNotNone(info.get('sku_name'))
self.assertIsNotNone(info.get('account_kind'))
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_account_information_with_blob_name(self, resource_group, location, storage_account, storage_account_key):
# Act
# Both container and blob names get ignored
await self._setup(storage_account.name, storage_account_key)
blob = self.bsc.get_blob_client("missing", "missing")
info = await blob.get_account_information()
# Assert
self.assertIsNotNone(info.get('sku_name'))
self.assertIsNotNone(info.get('account_kind'))
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_account_information_with_container_sas(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
container = self.bsc.get_container_client(self.container_name)
token = generate_container_sas(
container.account_name,
container.container_name,
account_key=container.credential.account_key,
permission=ContainerSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
sas_container = ContainerClient.from_container_url(container.url, credential=token)
# Act
info = await sas_container.get_account_information()
# Assert
self.assertIsNotNone(info.get('sku_name'))
self.assertIsNotNone(info.get('account_kind'))
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_get_account_information_with_blob_sas(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
blob_name = await self._create_block_blob()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
# Act
info = await sas_blob.get_account_information()
# Assert
self.assertIsNotNone(info.get('sku_name'))
self.assertIsNotNone(info.get('account_kind'))
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_download_to_file_with_sas(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
sas_token = generate_blob_sas(
source_blob.account_name,
source_blob.container_name,
source_blob.blob_name,
snapshot=source_blob.snapshot,
account_key=source_blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
FILE_PATH = '_to_file_with_sas.async.dat'
blob = BlobClient.from_blob_url(source_blob.url, credential=sas_token)
# Act
await download_blob_from_url(blob.url, FILE_PATH)
# Assert
with open(FILE_PATH, 'rb') as stream:
actual = stream.read()
self.assertEqual(data, actual)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_download_to_file_with_credential(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
FILE_PATH = 'to_file_with_credential.async.dat'
# Act
await download_blob_from_url(
source_blob.url, FILE_PATH,
max_concurrency=2,
credential=rmt_key)
# Assert
with open(FILE_PATH, 'rb') as stream:
actual = stream.read()
self.assertEqual(data, actual)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_download_to_stream_with_credential(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
FILE_PATH = 'to_stream_with_credential.async.dat'
# Act
with open(FILE_PATH, 'wb') as stream:
await download_blob_from_url(
source_blob.url, stream,
max_concurrency=2,
credential=rmt_key)
# Assert
with open(FILE_PATH, 'rb') as stream:
actual = stream.read()
self.assertEqual(data, actual)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_download_to_file_with_existing_file(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
FILE_PATH = 'with_existing_file.async.dat'
# Act
await download_blob_from_url(
source_blob.url, FILE_PATH,
credential=rmt_key)
with self.assertRaises(ValueError):
await download_blob_from_url(source_blob.url, FILE_PATH)
# Assert
with open(FILE_PATH, 'rb') as stream:
actual = stream.read()
self.assertEqual(data, actual)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@StorageAccountPreparer(random_name_enabled=True, name_prefix='pyrmtstorage', parameter_name='rmt')
@AsyncStorageTestCase.await_prepared_test
async def test_download_to_file_with_existing_file_overwrite(self, resource_group, location, storage_account, storage_account_key, rmt, rmt_key):
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
await self._setup_remote(rmt.name, rmt_key)
await self._create_remote_container()
source_blob = await self._create_remote_block_blob(blob_data=data)
FILE_PATH = 'existing_file_overwrite.async.dat'
# Act
await download_blob_from_url(
source_blob.url, FILE_PATH,
credential=rmt_key)
data2 = b'ABCDEFGH' * 1024 * 1024
source_blob = await self._create_remote_block_blob(blob_data=data2)
await download_blob_from_url(
source_blob.url, FILE_PATH, overwrite=True,
credential=rmt_key)
# Assert
with open(FILE_PATH, 'rb') as stream:
actual = stream.read()
self.assertEqual(data2, actual)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_bytes_with_sas(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
token = generate_blob_sas(
blob.account_name,
blob.container_name,
blob.blob_name,
snapshot=blob.snapshot,
account_key=blob.credential.account_key,
permission=BlobSasPermissions(write=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
sas_blob = BlobClient.from_blob_url(blob.url, credential=token)
# Act
uploaded = await upload_blob_to_url(sas_blob.url, data)
# Assert
self.assertIsNotNone(uploaded)
content = await (await blob.download_blob()).readall()
self.assertEqual(data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_bytes_with_credential(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
uploaded = await upload_blob_to_url(
blob.url, data, credential=storage_account_key)
# Assert
self.assertIsNotNone(uploaded)
content = await (await blob.download_blob()).readall()
self.assertEqual(data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_bytes_with_existing_blob(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(b"existing_data")
# Act
with self.assertRaises(ResourceExistsError):
await upload_blob_to_url(
blob.url, data, credential=storage_account_key)
# Assert
content = await (await blob.download_blob()).readall()
self.assertEqual(b"existing_data", content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_bytes_with_existing_blob_overwrite(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
await blob.upload_blob(b"existing_data")
# Act
uploaded = await upload_blob_to_url(
blob.url, data,
overwrite=True,
credential=storage_account_key)
# Assert
self.assertIsNotNone(uploaded)
content = await (await blob.download_blob()).readall()
self.assertEqual(data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_text_with_credential(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = '12345678' * 1024 * 1024
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
uploaded = await upload_blob_to_url(
blob.url, data, credential=storage_account_key)
# Assert
self.assertIsNotNone(uploaded)
stream = await blob.download_blob(encoding='UTF-8')
content = await stream.readall()
self.assertEqual(data, content)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_upload_to_url_file_with_credential(self, resource_group, location, storage_account, storage_account_key):
# SAS URL is calculated from storage key, so this test runs live only
# Arrange
await self._setup(storage_account.name, storage_account_key)
data = b'12345678' * 1024 * 1024
FILE_PATH = 'url_file_with_credential.async.dat'
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
with open(FILE_PATH, 'rb'):
uploaded = await upload_blob_to_url(
blob.url, data, credential=storage_account_key)
# Assert
self.assertIsNotNone(uploaded)
content = await (await blob.download_blob()).readall()
self.assertEqual(data, content)
self._teardown(FILE_PATH)
@GlobalStorageAccountPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_transport_closed_only_once(self, resource_group, location, storage_account, storage_account_key):
container_name = self.get_resource_name('utcontainerasync')
transport = AioHttpTransport()
bsc = BlobServiceClient(self.account_url(storage_account.name, "blob"), credential=storage_account_key, transport=transport)
blob_name = self._get_blob_reference()
async with bsc:
await bsc.get_service_properties()
assert transport.session is not None
async with bsc.get_blob_client(container_name, blob_name) as bc:
assert transport.session is not None
await bsc.get_service_properties()
assert transport.session is not None
# ------------------------------------------------------------------------------
| true
| true
|
7905360af6477616d3533bddce9622fc67b53657
| 924
|
py
|
Python
|
fastapi_builder/helpers.py
|
fmw666/fastapi-cli
|
6a1b1827f2abd9490f4eeed8b4594634f0a08fd2
|
[
"MIT"
] | 1
|
2022-02-16T12:27:53.000Z
|
2022-02-16T12:27:53.000Z
|
fastapi_builder/helpers.py
|
fmw666/fastapi-cli
|
6a1b1827f2abd9490f4eeed8b4594634f0a08fd2
|
[
"MIT"
] | null | null | null |
fastapi_builder/helpers.py
|
fmw666/fastapi-cli
|
6a1b1827f2abd9490f4eeed8b4594634f0a08fd2
|
[
"MIT"
] | null | null | null |
import re
from typing import TypeVar
import questionary
EnumType = TypeVar("EnumType")
# 驼峰命名转蛇形命名
def camel_to_snake(text: str) -> str:
return re.sub(r"(?<!^)(?=[A-Z])", "_", text).lower()
# 蛇形命名转驼峰命名
def snake_to_camel(text: str) -> str:
return text.split('_')[0] + "".join(x.title() for x in text.split('_')[1:])
# 驼峰命名转帕斯卡命名
def camel_to_pascal(text: str) -> str:
return text[0].upper() + text[1:]
def question(choices: EnumType) -> questionary.Question:
prompt = camel_to_snake(choices.__name__).replace("_", " ") # type: ignore
return questionary.select(f"Select the {prompt}: ", choices=list(choices))
def binary_question(option: str) -> questionary.Question:
return questionary.confirm(f"Do you want {option}?", default=False)
def text_question(default: str) -> questionary.Question:
return questionary.text(f"The name of the database you want to create? ", default=default)
| 28
| 94
| 0.691558
|
import re
from typing import TypeVar
import questionary
EnumType = TypeVar("EnumType")
def camel_to_snake(text: str) -> str:
return re.sub(r"(?<!^)(?=[A-Z])", "_", text).lower()
def snake_to_camel(text: str) -> str:
return text.split('_')[0] + "".join(x.title() for x in text.split('_')[1:])
def camel_to_pascal(text: str) -> str:
return text[0].upper() + text[1:]
def question(choices: EnumType) -> questionary.Question:
prompt = camel_to_snake(choices.__name__).replace("_", " ")
return questionary.select(f"Select the {prompt}: ", choices=list(choices))
def binary_question(option: str) -> questionary.Question:
return questionary.confirm(f"Do you want {option}?", default=False)
def text_question(default: str) -> questionary.Question:
return questionary.text(f"The name of the database you want to create? ", default=default)
| true
| true
|
790537d8d261f30dd5d3294b70a4a5c50ccee06f
| 8,339
|
py
|
Python
|
test/functional/proxy_test.py
|
mdfkbtc/PRiVCY
|
e07b058eef2d9e0a74ffd1fe474ed27788355923
|
[
"MIT"
] | 5
|
2021-05-05T02:54:32.000Z
|
2021-11-21T13:04:14.000Z
|
test/functional/proxy_test.py
|
mdfkbtc/PRiVCY
|
e07b058eef2d9e0a74ffd1fe474ed27788355923
|
[
"MIT"
] | 1
|
2022-01-15T17:24:14.000Z
|
2022-01-15T17:24:14.000Z
|
test/functional/proxy_test.py
|
mdfkbtc/PRiVCY
|
e07b058eef2d9e0a74ffd1fe474ed27788355923
|
[
"MIT"
] | 2
|
2021-06-05T10:01:35.000Z
|
2021-12-10T00:09:33.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test privcyd with different proxy configuration.
Test plan:
- Start privcyd's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on privcyd side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create privcyds that connect to them
- Manipulate the privcyds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: privcyd's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: privcyd's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.282178
| 120
| 0.624535
|
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: privcyd's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| true
| true
|
790538f9a39d41e11ea933615abccde020b70db5
| 4,173
|
py
|
Python
|
Precompilar/relativo.py
|
EzioFenix/Compilador-M68HC11
|
6f0688bfe72b13f28412cb3f424b459e39ba51b6
|
[
"MIT"
] | null | null | null |
Precompilar/relativo.py
|
EzioFenix/Compilador-M68HC11
|
6f0688bfe72b13f28412cb3f424b459e39ba51b6
|
[
"MIT"
] | null | null | null |
Precompilar/relativo.py
|
EzioFenix/Compilador-M68HC11
|
6f0688bfe72b13f28412cb3f424b459e39ba51b6
|
[
"MIT"
] | null | null | null |
import re
from Error import Error4,Error6,Error9
from DataBase import BaseDatos,BdRow
from .precompilada import precompilada
from typing import Pattern
def getEtiqueta(linea:str)->str:
"""Obtiene el nombre de la captura
Args:
linea (str): Linea donde se va a buscar la etiqueta
Returns:
str: Regresa el nombre de la etiqueta
"""
# Buscamos el mnemonico
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
# Obtenemos el mnemonico-------------------------------
etiqueta =busqueda.group(2)
return etiqueta
def calcularEtiqueta(sustraendo:str,minuendo:str)-> str:
"""Resta la diferencia entre dos PC en hexadecimal
sustraendo - minuendo
- Si
- Sustraendo - minuendo
- En caso de error regresa 'e10' operando muy grande
Args:
sustraendo (str): Ejemplo '0x7'
minuendo (str): Ejemplo '0x1'
Returns:
str: Ejemplo '0x06'
"""
print(sustraendo)
print(minuendo)
sustraendo=int(sustraendo,16)
minuendo=int(minuendo,16)
resultado:int= sustraendo-minuendo
print(resultado)
if resultado <-127 or 128<resultado:
return 'e10' #E10 el salto relativo es muy lejano
# Si es negativa
elif resultado<0:
return convertirA2Hex(resultado)
# si es positiva
else:
return hex(resultado)
def bindigits(n:int, bits:int)->str:
"""Convierte a binario un numero de complemento A2 en caso de negativo, normal en caso de ser positivo
Args:
n (int): E.g 7
bits (int): eg 3
Returns:
str: E.g '001'
"""
s = bin(n & int("1"*bits, 2))[2:]
return ("{0:0>%s}" % (bits)).format(s)
def convertirA2Hex(numero:int)-> str:
"""Convierte un numero decimal a hexadecimal
- Si el número es decimal lo convierte a complemento A2
Args:
numero (int): Número decimal que se quiere convertir Eg. 07
Returns:
str: Eg. 0x07
"""
# cuantos bits ocupa el número hexadecimal
cuantosBits=(len(hex(numero))-2) *4 # el -2 es 0x, el 4 es porque 1 hex equivale a 4 bits
#numero convertido a binario
binario=bindigits(numero,cuantosBits)
return hex(int(binario, 2))
def precompilarPasada1(numLinea:int,modo:str,linea:str,pc: str)->precompilada:
# variables globales
# Buscamos el mnemonico
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
# Obtenemos el mnemonico-------------------------------
mnemonico =busqueda.group(1)
etiqueta=busqueda.group(2)
# Consulta a la base de datos-------------------------------
consultaBd:BdRow = BaseDatos.bdSearch(mnemonico,6)
# obtenemos el Pc Actual=pc + bytesOcupados
pcActual=hex(int(pc,16) +2) # El más 2 es porque todas las relativos usan 2 bytes
# Datos directos--------------------------------------
lineaPrecompilada=precompilada(numLinea,modo,pcActual,consultaBd.opcode,etiqueta,consultaBd.byte)
# Datos detivados-----------------------------------
lineaPrecompilada.bytesOcupados=consultaBd.byte
return lineaPrecompilada
def precompilarPasada2(lineaPrecompilada:precompilada,pcEtiqueta:str)->precompilada:
# obtenemos el Pc Actual=pc + bytesOcupados
pcActual=hex(int(lineaPrecompilada.pcActual,16) ) # El más 2 es porque todas las relativos usan 2 bytes
lineaPrecompilada1:precompilada
# Calculamos el operando
operandoPrecompilado=calcularEtiqueta(pcEtiqueta,pcActual)
# Verificamos si el salto relaitvo no es tan grande
if operandoPrecompilado=='e10': # en caso de error salto muy lejando
lineaPrecompilada1=precompilada(0,'','','','',0)
lineaPrecompilada1.error='e10'
else:
operandoPrecompilado=operandoPrecompilado[2:]
# hacer una copia
lineaPrecompilada1=precompilada(lineaPrecompilada.numLinea,lineaPrecompilada.modo,hex(int(lineaPrecompilada.pcActual,16)-2),lineaPrecompilada.opcode,operandoPrecompilado,lineaPrecompilada.byte)
print(operandoPrecompilado)
return lineaPrecompilada1
#return lineaPrecompilada1
| 29.595745
| 201
| 0.656602
|
import re
from Error import Error4,Error6,Error9
from DataBase import BaseDatos,BdRow
from .precompilada import precompilada
from typing import Pattern
def getEtiqueta(linea:str)->str:
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
etiqueta =busqueda.group(2)
return etiqueta
def calcularEtiqueta(sustraendo:str,minuendo:str)-> str:
print(sustraendo)
print(minuendo)
sustraendo=int(sustraendo,16)
minuendo=int(minuendo,16)
resultado:int= sustraendo-minuendo
print(resultado)
if resultado <-127 or 128<resultado:
return 'e10'
elif resultado<0:
return convertirA2Hex(resultado)
else:
return hex(resultado)
def bindigits(n:int, bits:int)->str:
s = bin(n & int("1"*bits, 2))[2:]
return ("{0:0>%s}" % (bits)).format(s)
def convertirA2Hex(numero:int)-> str:
cuantosBits=(len(hex(numero))-2) *4
binario=bindigits(numero,cuantosBits)
return hex(int(binario, 2))
def precompilarPasada1(numLinea:int,modo:str,linea:str,pc: str)->precompilada:
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
mnemonico =busqueda.group(1)
etiqueta=busqueda.group(2)
consultaBd:BdRow = BaseDatos.bdSearch(mnemonico,6)
pcActual=hex(int(pc,16) +2)
lineaPrecompilada=precompilada(numLinea,modo,pcActual,consultaBd.opcode,etiqueta,consultaBd.byte)
lineaPrecompilada.bytesOcupados=consultaBd.byte
return lineaPrecompilada
def precompilarPasada2(lineaPrecompilada:precompilada,pcEtiqueta:str)->precompilada:
pcActual=hex(int(lineaPrecompilada.pcActual,16) )
lineaPrecompilada1:precompilada
operandoPrecompilado=calcularEtiqueta(pcEtiqueta,pcActual)
if operandoPrecompilado=='e10':
lineaPrecompilada1=precompilada(0,'','','','',0)
lineaPrecompilada1.error='e10'
else:
operandoPrecompilado=operandoPrecompilado[2:]
lineaPrecompilada1=precompilada(lineaPrecompilada.numLinea,lineaPrecompilada.modo,hex(int(lineaPrecompilada.pcActual,16)-2),lineaPrecompilada.opcode,operandoPrecompilado,lineaPrecompilada.byte)
print(operandoPrecompilado)
return lineaPrecompilada1
| true
| true
|
79053aea48e5eef7977740c6e769e2a55ed65589
| 1,121
|
py
|
Python
|
src/gamesbyexample/rainbow2.py
|
skinzor/PythonStdioGames
|
75f27af19d7f1d555b0fd85fbcf215f07660b93f
|
[
"MIT"
] | 1
|
2019-11-30T17:04:09.000Z
|
2019-11-30T17:04:09.000Z
|
src/gamesbyexample/rainbow2.py
|
skinzor/PythonStdioGames
|
75f27af19d7f1d555b0fd85fbcf215f07660b93f
|
[
"MIT"
] | null | null | null |
src/gamesbyexample/rainbow2.py
|
skinzor/PythonStdioGames
|
75f27af19d7f1d555b0fd85fbcf215f07660b93f
|
[
"MIT"
] | null | null | null |
# Rainbow 2, by Al Sweigart al@inventwithpython.com
# Shows a simple squiggle rainbow animation.
import time, random, sys
try:
import bext
except ImportError:
print("""This program requires the bext module, which you can install by
opening a Terminal window (on macOS & Linux) and running:
python3 -m pip install --user bext
or a Command Prompt window (on Windows) and running:
python -m pip install --user bext""")
sys.exit()
indent = 10 # How many spaces to indent.
while True:
print(' ' * indent, end='')
bext.fg('red')
print('##', end='')
bext.fg('yellow')
print('##', end='')
bext.fg('green')
print('##', end='')
bext.fg('blue')
print('##', end='')
bext.fg('cyan')
print('##', end='')
bext.fg('purple')
print('##')
if random.randint(0, 1) == 0:
# Increase the number of spaces:
indent = indent + 1
if indent > 20:
indent = 20
else:
# Decrease the number of spaces:
indent = indent - 1
if indent < 0:
indent = 0
time.sleep(0.05) # Add a slight pause.
| 22.877551
| 76
| 0.576271
|
import time, random, sys
try:
import bext
except ImportError:
print("""This program requires the bext module, which you can install by
opening a Terminal window (on macOS & Linux) and running:
python3 -m pip install --user bext
or a Command Prompt window (on Windows) and running:
python -m pip install --user bext""")
sys.exit()
indent = 10
while True:
print(' ' * indent, end='')
bext.fg('red')
print('##', end='')
bext.fg('yellow')
print('##', end='')
bext.fg('green')
print('##', end='')
bext.fg('blue')
print('##', end='')
bext.fg('cyan')
print('##', end='')
bext.fg('purple')
print('##')
if random.randint(0, 1) == 0:
indent = indent + 1
if indent > 20:
indent = 20
else:
indent = indent - 1
if indent < 0:
indent = 0
time.sleep(0.05)
| true
| true
|
79053b7736fa9fced53aabb1dc14c491955511d1
| 6,382
|
py
|
Python
|
struct2tensor/expression_impl/reroot.py
|
jay90099/struct2tensor
|
47d651757efa27586bf75f991b2174d8173a750b
|
[
"Apache-2.0"
] | 30
|
2019-10-07T21:31:44.000Z
|
2022-03-30T17:11:44.000Z
|
struct2tensor/expression_impl/reroot.py
|
jay90099/struct2tensor
|
47d651757efa27586bf75f991b2174d8173a750b
|
[
"Apache-2.0"
] | 2
|
2020-03-23T20:48:14.000Z
|
2021-04-16T15:05:33.000Z
|
struct2tensor/expression_impl/reroot.py
|
jay90099/struct2tensor
|
47d651757efa27586bf75f991b2174d8173a750b
|
[
"Apache-2.0"
] | 30
|
2019-07-16T13:01:53.000Z
|
2022-03-01T22:04:36.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reroot to a subtree, maintaining an input proto index.
reroot is similar to get_descendant_or_error. However, this method allows
you to call create_proto_index(...) later on, that gives you a reference to the
original proto.
"""
from typing import FrozenSet, Optional, Sequence
from struct2tensor import calculate_options
from struct2tensor import expression
from struct2tensor import expression_add
from struct2tensor import path
from struct2tensor import prensor
import tensorflow as tf
def reroot(root: expression.Expression,
source_path: path.Path) -> expression.Expression:
"""Reroot to a new path, maintaining a input proto index.
Similar to root.get_descendant_or_error(source_path): however, this
method retains the ability to get a map to the original index.
Args:
root: the original root.
source_path: the path to the new root.
Returns:
the new root.
"""
new_root = root
for step in source_path.field_list:
new_root = _RerootExpression(new_root, step)
return new_root
def create_proto_index_field(root: expression.Expression,
new_field_name: path.Step
) -> expression.Expression:
return expression_add.add_paths(
root, {path.Path([new_field_name]): _InputProtoIndexExpression(root)})
class _RerootRootNodeTensor(prensor.RootNodeTensor):
"""The reroot root node.
This contains a map from a current index to the original index of a proto.
"""
def __init__(self, size: tf.Tensor, input_proto_index: tf.Tensor):
super().__init__(size)
self._input_proto_index = input_proto_index
@property
def input_proto_index(self):
return self._input_proto_index
def _get_proto_index_parent_index(node: prensor.RootNodeTensor):
return tf.range(node.size)
def _get_input_proto_index(node: prensor.RootNodeTensor):
if isinstance(node, _RerootRootNodeTensor):
return node.input_proto_index
return _get_proto_index_parent_index(node)
class _RerootExpression(expression.Expression):
"""Reroot to a new path, maintaining a input proto index."""
def __init__(self, original_root: expression.Expression,
field_name: path.Step):
super().__init__(True, None)
self._field_name = field_name
self._original_root = original_root
self._new_root = original_root.get_child_or_error(field_name)
if self._new_root.type is not None:
raise ValueError("New root must be a message type: {}".format(
str(self._field_name)))
# TODO(martinz): Check that the "original root source expression" has a type
# in (_RerootExpression, prensor._ProtoRootExpression)
# To do this, we need a general technique similar to
# expression_add._is_true_source_expression: however, this should also cover
# intermediate operations like "project".
# Since this check is not present, if it should have fired, there will be
# an error when calculate(...) is called.
def get_source_expressions(self) -> Sequence[expression.Expression]:
return [self._original_root, self._new_root]
def calculate(
self,
sources: Sequence[prensor.NodeTensor],
destinations: Sequence[expression.Expression],
options: calculate_options.Options,
side_info: Optional[prensor.Prensor] = None) -> prensor.NodeTensor:
[old_root_value, new_root_value] = sources
if isinstance(old_root_value, prensor.RootNodeTensor) and isinstance(
new_root_value, prensor.ChildNodeTensor):
old_input_proto_index = _get_input_proto_index(old_root_value)
# Notice that the "gather" operation is similar to promote.
return _RerootRootNodeTensor(
tf.size(new_root_value.parent_index, out_type=tf.int64),
tf.gather(old_input_proto_index, new_root_value.parent_index))
raise ValueError("Source types incorrect")
def calculation_is_identity(self) -> bool:
return False
def calculation_equal(self, expr: expression.Expression) -> bool:
# Although path can vary, it is not used in the calculation, just to
return isinstance(expr, _RerootExpression)
def _get_child_impl(self,
field_name: path.Step) -> Optional[expression.Expression]:
return self._new_root.get_child(field_name)
def known_field_names(self) -> FrozenSet[path.Step]:
return self._new_root.known_field_names()
class _InputProtoIndexExpression(expression.Leaf):
"""A proto index expression."""
def __init__(self, root: expression.Expression):
"""Constructor for proto index expression.
Args:
root: an expression that must return a RootNodeTensor.
"""
super().__init__(is_repeated=False, my_type=tf.int64)
self._root = root
def get_source_expressions(self) -> Sequence[expression.Expression]:
return [self._root]
def calculate(
self,
sources: Sequence[prensor.NodeTensor],
destinations: Sequence[expression.Expression],
options: calculate_options.Options,
side_info: Optional[prensor.Prensor] = None) -> prensor.NodeTensor:
[root_node] = sources
# The following check ensures not just that we can calculate the value,
# but that no "improper" reroots were done.
if isinstance(root_node, prensor.RootNodeTensor):
return prensor.LeafNodeTensor(
_get_proto_index_parent_index(root_node),
_get_input_proto_index(root_node),
is_repeated=False)
raise ValueError(
"Illegal operation: expected a true root node: got {}".format(
str(root_node)))
def calculation_is_identity(self) -> bool:
return False
def calculation_equal(self, expr: expression.Expression) -> bool:
# Although path can vary, it is not used in the calculation, just to
return isinstance(expr, _InputProtoIndexExpression)
| 36.056497
| 80
| 0.733469
|
from typing import FrozenSet, Optional, Sequence
from struct2tensor import calculate_options
from struct2tensor import expression
from struct2tensor import expression_add
from struct2tensor import path
from struct2tensor import prensor
import tensorflow as tf
def reroot(root: expression.Expression,
source_path: path.Path) -> expression.Expression:
new_root = root
for step in source_path.field_list:
new_root = _RerootExpression(new_root, step)
return new_root
def create_proto_index_field(root: expression.Expression,
new_field_name: path.Step
) -> expression.Expression:
return expression_add.add_paths(
root, {path.Path([new_field_name]): _InputProtoIndexExpression(root)})
class _RerootRootNodeTensor(prensor.RootNodeTensor):
def __init__(self, size: tf.Tensor, input_proto_index: tf.Tensor):
super().__init__(size)
self._input_proto_index = input_proto_index
@property
def input_proto_index(self):
return self._input_proto_index
def _get_proto_index_parent_index(node: prensor.RootNodeTensor):
return tf.range(node.size)
def _get_input_proto_index(node: prensor.RootNodeTensor):
if isinstance(node, _RerootRootNodeTensor):
return node.input_proto_index
return _get_proto_index_parent_index(node)
class _RerootExpression(expression.Expression):
def __init__(self, original_root: expression.Expression,
field_name: path.Step):
super().__init__(True, None)
self._field_name = field_name
self._original_root = original_root
self._new_root = original_root.get_child_or_error(field_name)
if self._new_root.type is not None:
raise ValueError("New root must be a message type: {}".format(
str(self._field_name)))
def get_source_expressions(self) -> Sequence[expression.Expression]:
return [self._original_root, self._new_root]
def calculate(
self,
sources: Sequence[prensor.NodeTensor],
destinations: Sequence[expression.Expression],
options: calculate_options.Options,
side_info: Optional[prensor.Prensor] = None) -> prensor.NodeTensor:
[old_root_value, new_root_value] = sources
if isinstance(old_root_value, prensor.RootNodeTensor) and isinstance(
new_root_value, prensor.ChildNodeTensor):
old_input_proto_index = _get_input_proto_index(old_root_value)
return _RerootRootNodeTensor(
tf.size(new_root_value.parent_index, out_type=tf.int64),
tf.gather(old_input_proto_index, new_root_value.parent_index))
raise ValueError("Source types incorrect")
def calculation_is_identity(self) -> bool:
return False
def calculation_equal(self, expr: expression.Expression) -> bool:
return isinstance(expr, _RerootExpression)
def _get_child_impl(self,
field_name: path.Step) -> Optional[expression.Expression]:
return self._new_root.get_child(field_name)
def known_field_names(self) -> FrozenSet[path.Step]:
return self._new_root.known_field_names()
class _InputProtoIndexExpression(expression.Leaf):
def __init__(self, root: expression.Expression):
super().__init__(is_repeated=False, my_type=tf.int64)
self._root = root
def get_source_expressions(self) -> Sequence[expression.Expression]:
return [self._root]
def calculate(
self,
sources: Sequence[prensor.NodeTensor],
destinations: Sequence[expression.Expression],
options: calculate_options.Options,
side_info: Optional[prensor.Prensor] = None) -> prensor.NodeTensor:
[root_node] = sources
if isinstance(root_node, prensor.RootNodeTensor):
return prensor.LeafNodeTensor(
_get_proto_index_parent_index(root_node),
_get_input_proto_index(root_node),
is_repeated=False)
raise ValueError(
"Illegal operation: expected a true root node: got {}".format(
str(root_node)))
def calculation_is_identity(self) -> bool:
return False
def calculation_equal(self, expr: expression.Expression) -> bool:
return isinstance(expr, _InputProtoIndexExpression)
| true
| true
|
79053bc19046bc5c09c273b68271b530847adeaa
| 1,800
|
py
|
Python
|
src/app.py
|
mtik00/wkhtmltopdf-service
|
742787d43d5a6b9b3d69c6471d60101ae8fde350
|
[
"MIT"
] | null | null | null |
src/app.py
|
mtik00/wkhtmltopdf-service
|
742787d43d5a6b9b3d69c6471d60101ae8fde350
|
[
"MIT"
] | null | null | null |
src/app.py
|
mtik00/wkhtmltopdf-service
|
742787d43d5a6b9b3d69c6471d60101ae8fde350
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file contains code to serve a web application to convert HTML to PDF.
This application uses a local install of the `wkhtmltopdf` binary for the conversion.
"""
import os
from subprocess import check_output
from tempfile import TemporaryDirectory
from starlette.applications import Starlette
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import Route
async def execute_wkhtmltopdf(uri: str) -> bytes:
"""Run wkhtmltopdf on the command-line and return the output."""
cmd = [
"wkhtmltopdf",
"--log-level",
"none",
uri,
"-",
]
return check_output(cmd)
async def convert_body(request: Request):
"""
It's just _way_ easier to deal with files rather than STDIN.
Take the body of the request, write it to a temporary file, then use
wkhtmltopdf to convert it.
"""
data = await request.body()
if not data:
return Response("ERROR: No body", status_code=400)
with TemporaryDirectory() as tmpdirname:
outfile = os.path.join(tmpdirname, "out.html")
with open(outfile, "w") as fh:
fh.write(data.decode("utf-8"))
bytes = await execute_wkhtmltopdf(outfile)
return Response(bytes, media_type="application/pdf")
async def convert_uri(request: Request):
data = await request.json()
if "uri" not in data:
return Response("Invalid JSON in request", status_code=400)
bytes = await execute_wkhtmltopdf(data["uri"])
return Response(bytes, media_type="application/pdf")
app = Starlette(
debug=True,
routes=[
Route("/uri", convert_uri, methods=["POST"]),
Route("/data", convert_body, methods=["POST"]),
],
)
| 25.714286
| 85
| 0.669444
|
import os
from subprocess import check_output
from tempfile import TemporaryDirectory
from starlette.applications import Starlette
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import Route
async def execute_wkhtmltopdf(uri: str) -> bytes:
cmd = [
"wkhtmltopdf",
"--log-level",
"none",
uri,
"-",
]
return check_output(cmd)
async def convert_body(request: Request):
data = await request.body()
if not data:
return Response("ERROR: No body", status_code=400)
with TemporaryDirectory() as tmpdirname:
outfile = os.path.join(tmpdirname, "out.html")
with open(outfile, "w") as fh:
fh.write(data.decode("utf-8"))
bytes = await execute_wkhtmltopdf(outfile)
return Response(bytes, media_type="application/pdf")
async def convert_uri(request: Request):
data = await request.json()
if "uri" not in data:
return Response("Invalid JSON in request", status_code=400)
bytes = await execute_wkhtmltopdf(data["uri"])
return Response(bytes, media_type="application/pdf")
app = Starlette(
debug=True,
routes=[
Route("/uri", convert_uri, methods=["POST"]),
Route("/data", convert_body, methods=["POST"]),
],
)
| true
| true
|
79053c712fcad4880d69f2f62072734d673dd75f
| 6,913
|
py
|
Python
|
homeassistant/components/netgear/config_flow.py
|
eyager1/core
|
c0ae31d86c841107930cf471fd60d65b5c163f16
|
[
"Apache-2.0"
] | 1
|
2022-02-19T14:13:50.000Z
|
2022-02-19T14:13:50.000Z
|
homeassistant/components/netgear/config_flow.py
|
eyager1/core
|
c0ae31d86c841107930cf471fd60d65b5c163f16
|
[
"Apache-2.0"
] | 17
|
2021-11-24T06:24:25.000Z
|
2022-03-31T06:23:29.000Z
|
homeassistant/components/netgear/config_flow.py
|
eyager1/core
|
c0ae31d86c841107930cf471fd60d65b5c163f16
|
[
"Apache-2.0"
] | null | null | null |
"""Config flow to configure the Netgear integration."""
from __future__ import annotations
import logging
from typing import cast
from urllib.parse import urlparse
from pynetgear import DEFAULT_HOST, DEFAULT_PORT, DEFAULT_USER
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.util.network import is_ipv4_address
from .const import (
CONF_CONSIDER_HOME,
DEFAULT_CONSIDER_HOME,
DEFAULT_NAME,
DOMAIN,
MODELS_PORT_80,
MODELS_PORT_5555,
PORT_80,
PORT_5555,
)
from .errors import CannotLoginException
from .router import get_api
_LOGGER = logging.getLogger(__name__)
def _discovery_schema_with_defaults(discovery_info):
return vol.Schema(_ordered_shared_schema(discovery_info))
def _user_schema_with_defaults(user_input):
user_schema = {vol.Optional(CONF_HOST, default=user_input.get(CONF_HOST, "")): str}
user_schema.update(_ordered_shared_schema(user_input))
return vol.Schema(user_schema)
def _ordered_shared_schema(schema_input):
return {
vol.Optional(CONF_USERNAME, default=schema_input.get(CONF_USERNAME, "")): str,
vol.Required(CONF_PASSWORD, default=schema_input.get(CONF_PASSWORD, "")): str,
}
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_CONSIDER_HOME,
default=self.config_entry.options.get(
CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME.total_seconds()
),
): int,
}
)
return self.async_show_form(step_id="init", data_schema=settings_schema)
class NetgearFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
def __init__(self):
"""Initialize the netgear config flow."""
self.placeholders = {
CONF_HOST: DEFAULT_HOST,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: DEFAULT_USER,
CONF_SSL: False,
}
self.discovered = False
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def _show_setup_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if not user_input:
user_input = {}
if self.discovered:
data_schema = _discovery_schema_with_defaults(user_input)
else:
data_schema = _user_schema_with_defaults(user_input)
return self.async_show_form(
step_id="user",
data_schema=data_schema,
errors=errors or {},
description_placeholders=self.placeholders,
)
async def async_step_ssdp(self, discovery_info: ssdp.SsdpServiceInfo) -> FlowResult:
"""Initialize flow from ssdp."""
updated_data: dict[str, str | int | bool] = {}
device_url = urlparse(discovery_info.ssdp_location)
if hostname := device_url.hostname:
hostname = cast(str, hostname)
updated_data[CONF_HOST] = hostname
if not is_ipv4_address(str(hostname)):
return self.async_abort(reason="not_ipv4_address")
_LOGGER.debug("Netgear ssdp discovery info: %s", discovery_info)
await self.async_set_unique_id(discovery_info.upnp[ssdp.ATTR_UPNP_SERIAL])
self._abort_if_unique_id_configured(updates=updated_data)
if device_url.scheme == "https":
updated_data[CONF_SSL] = True
else:
updated_data[CONF_SSL] = False
updated_data[CONF_PORT] = DEFAULT_PORT
for model in MODELS_PORT_80:
if discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NUMBER, "").startswith(
model
) or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME, "").startswith(
model
):
updated_data[CONF_PORT] = PORT_80
for model in MODELS_PORT_5555:
if discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NUMBER, "").startswith(
model
) or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME, "").startswith(
model
):
updated_data[CONF_PORT] = PORT_5555
updated_data[CONF_SSL] = True
self.placeholders.update(updated_data)
self.discovered = True
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return await self._show_setup_form()
host = user_input.get(CONF_HOST, self.placeholders[CONF_HOST])
port = self.placeholders[CONF_PORT]
ssl = self.placeholders[CONF_SSL]
username = user_input.get(CONF_USERNAME, self.placeholders[CONF_USERNAME])
password = user_input[CONF_PASSWORD]
if not username:
username = self.placeholders[CONF_USERNAME]
# Open connection and check authentication
try:
api = await self.hass.async_add_executor_job(
get_api, password, host, username, port, ssl
)
except CannotLoginException:
errors["base"] = "config"
if errors:
return await self._show_setup_form(user_input, errors)
# Check if already configured
info = await self.hass.async_add_executor_job(api.get_info)
await self.async_set_unique_id(info["SerialNumber"], raise_on_progress=False)
self._abort_if_unique_id_configured()
config_data = {
CONF_USERNAME: username,
CONF_PASSWORD: password,
CONF_HOST: host,
CONF_PORT: api.port,
CONF_SSL: api.ssl,
}
if info.get("ModelName") is not None and info.get("DeviceName") is not None:
name = f"{info['ModelName']} - {info['DeviceName']}"
else:
name = info.get("ModelName", DEFAULT_NAME)
return self.async_create_entry(
title=name,
data=config_data,
)
| 32.00463
| 88
| 0.645885
|
from __future__ import annotations
import logging
from typing import cast
from urllib.parse import urlparse
from pynetgear import DEFAULT_HOST, DEFAULT_PORT, DEFAULT_USER
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.util.network import is_ipv4_address
from .const import (
CONF_CONSIDER_HOME,
DEFAULT_CONSIDER_HOME,
DEFAULT_NAME,
DOMAIN,
MODELS_PORT_80,
MODELS_PORT_5555,
PORT_80,
PORT_5555,
)
from .errors import CannotLoginException
from .router import get_api
_LOGGER = logging.getLogger(__name__)
def _discovery_schema_with_defaults(discovery_info):
return vol.Schema(_ordered_shared_schema(discovery_info))
def _user_schema_with_defaults(user_input):
user_schema = {vol.Optional(CONF_HOST, default=user_input.get(CONF_HOST, "")): str}
user_schema.update(_ordered_shared_schema(user_input))
return vol.Schema(user_schema)
def _ordered_shared_schema(schema_input):
return {
vol.Optional(CONF_USERNAME, default=schema_input.get(CONF_USERNAME, "")): str,
vol.Required(CONF_PASSWORD, default=schema_input.get(CONF_PASSWORD, "")): str,
}
class OptionsFlowHandler(config_entries.OptionsFlow):
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_CONSIDER_HOME,
default=self.config_entry.options.get(
CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME.total_seconds()
),
): int,
}
)
return self.async_show_form(step_id="init", data_schema=settings_schema)
class NetgearFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
VERSION = 1
def __init__(self):
self.placeholders = {
CONF_HOST: DEFAULT_HOST,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: DEFAULT_USER,
CONF_SSL: False,
}
self.discovered = False
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> OptionsFlowHandler:
return OptionsFlowHandler(config_entry)
async def _show_setup_form(self, user_input=None, errors=None):
if not user_input:
user_input = {}
if self.discovered:
data_schema = _discovery_schema_with_defaults(user_input)
else:
data_schema = _user_schema_with_defaults(user_input)
return self.async_show_form(
step_id="user",
data_schema=data_schema,
errors=errors or {},
description_placeholders=self.placeholders,
)
async def async_step_ssdp(self, discovery_info: ssdp.SsdpServiceInfo) -> FlowResult:
updated_data: dict[str, str | int | bool] = {}
device_url = urlparse(discovery_info.ssdp_location)
if hostname := device_url.hostname:
hostname = cast(str, hostname)
updated_data[CONF_HOST] = hostname
if not is_ipv4_address(str(hostname)):
return self.async_abort(reason="not_ipv4_address")
_LOGGER.debug("Netgear ssdp discovery info: %s", discovery_info)
await self.async_set_unique_id(discovery_info.upnp[ssdp.ATTR_UPNP_SERIAL])
self._abort_if_unique_id_configured(updates=updated_data)
if device_url.scheme == "https":
updated_data[CONF_SSL] = True
else:
updated_data[CONF_SSL] = False
updated_data[CONF_PORT] = DEFAULT_PORT
for model in MODELS_PORT_80:
if discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NUMBER, "").startswith(
model
) or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME, "").startswith(
model
):
updated_data[CONF_PORT] = PORT_80
for model in MODELS_PORT_5555:
if discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NUMBER, "").startswith(
model
) or discovery_info.upnp.get(ssdp.ATTR_UPNP_MODEL_NAME, "").startswith(
model
):
updated_data[CONF_PORT] = PORT_5555
updated_data[CONF_SSL] = True
self.placeholders.update(updated_data)
self.discovered = True
return await self.async_step_user()
async def async_step_user(self, user_input=None):
errors = {}
if user_input is None:
return await self._show_setup_form()
host = user_input.get(CONF_HOST, self.placeholders[CONF_HOST])
port = self.placeholders[CONF_PORT]
ssl = self.placeholders[CONF_SSL]
username = user_input.get(CONF_USERNAME, self.placeholders[CONF_USERNAME])
password = user_input[CONF_PASSWORD]
if not username:
username = self.placeholders[CONF_USERNAME]
try:
api = await self.hass.async_add_executor_job(
get_api, password, host, username, port, ssl
)
except CannotLoginException:
errors["base"] = "config"
if errors:
return await self._show_setup_form(user_input, errors)
info = await self.hass.async_add_executor_job(api.get_info)
await self.async_set_unique_id(info["SerialNumber"], raise_on_progress=False)
self._abort_if_unique_id_configured()
config_data = {
CONF_USERNAME: username,
CONF_PASSWORD: password,
CONF_HOST: host,
CONF_PORT: api.port,
CONF_SSL: api.ssl,
}
if info.get("ModelName") is not None and info.get("DeviceName") is not None:
name = f"{info['ModelName']} - {info['DeviceName']}"
else:
name = info.get("ModelName", DEFAULT_NAME)
return self.async_create_entry(
title=name,
data=config_data,
)
| true
| true
|
79053e05c8a875a25eec2528239836dc98fe8a9e
| 616
|
py
|
Python
|
TPS_dice_roller_bot/core/parse.py
|
PumaConcolor/TPS-dice-roller-bot
|
4ffb2498cdb3411d4d1b2a33eda828d174e997cb
|
[
"MIT"
] | 4
|
2020-10-06T14:47:17.000Z
|
2022-02-24T17:24:26.000Z
|
TPS_dice_roller_bot/core/parse.py
|
PumaConcolor/TPS-dice-roller-bot
|
4ffb2498cdb3411d4d1b2a33eda828d174e997cb
|
[
"MIT"
] | null | null | null |
TPS_dice_roller_bot/core/parse.py
|
PumaConcolor/TPS-dice-roller-bot
|
4ffb2498cdb3411d4d1b2a33eda828d174e997cb
|
[
"MIT"
] | 1
|
2020-10-06T14:47:18.000Z
|
2020-10-06T14:47:18.000Z
|
import re
### parse_text(text)
# takes a string, return a list of strings with the matching groups
def parse_text_regex(text, regex):
try:
compiled_regex = re.compile(regex)
if compiled_regex is None:
raise Exception(f"String {text} doesn't match {regex}")
except TypeError as te:
raise Exception(te)
except Exception as e:
raise e
match = compiled_regex.match(text)
return match.groups()
def clean_string_with_regex(text, regex):
cleaned_string = re.sub(regex, '', text)
cleaned_string = cleaned_string.strip()
return cleaned_string
| 24.64
| 67
| 0.676948
|
import re
try:
compiled_regex = re.compile(regex)
if compiled_regex is None:
raise Exception(f"String {text} doesn't match {regex}")
except TypeError as te:
raise Exception(te)
except Exception as e:
raise e
match = compiled_regex.match(text)
return match.groups()
def clean_string_with_regex(text, regex):
cleaned_string = re.sub(regex, '', text)
cleaned_string = cleaned_string.strip()
return cleaned_string
| true
| true
|
79053f51d42e7f64ff65013039b18d3f557b30c8
| 1,676
|
py
|
Python
|
aulas/05-06/variaveis_aleatorias.py
|
thiago9864/introducao_modelagem
|
7ec90d266e1bbae7f942f2c600c4ea1d88d17614
|
[
"MIT"
] | 1
|
2019-05-09T15:42:43.000Z
|
2019-05-09T15:42:43.000Z
|
aulas/05-06/variaveis_aleatorias.py
|
thiago9864/introducao_modelagem
|
7ec90d266e1bbae7f942f2c600c4ea1d88d17614
|
[
"MIT"
] | null | null | null |
aulas/05-06/variaveis_aleatorias.py
|
thiago9864/introducao_modelagem
|
7ec90d266e1bbae7f942f2c600c4ea1d88d17614
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 5 08:32:13 2019
@author: Thiago
"""
import numpy as np
import pylab as pl
#%%
#Simulação de uma va
def va_estoque():
p=np.array([0.1, 0.2, 0.6, 0.1])
x=np.random.rand()
if 0 < x <= p[0]:
return 1
elif p[0] < x <= p[0]+p[1]:
return 2
elif p[0]+p[1] < x <= p[0]+p[1]+p[2]:
return 3
elif p[0]+p[1]+p[2] < x <= 1.0:
return 4
v = [va_estoque() for i in range(100000)]
pl.hist(v,)
pl.show()
#%%
#simulação estoque
M, T, estoque, lucro = 3, 3, 10, 0
R = 10000
for i in range(R):
Y=va_estoque()
lucro += 20*min(estoque, Y)
estoque -= max(0, estoque-Y)
lucro -= 5*estoque
if estoque<M:
estoque += T
lucro -= 10*T
lucro /= R
print(M, T, lucro, estoque)
#%%
#simulação Urna de Ehrenfest
N, s = 100, []
for j in range(1000):
v = [True for i in range(N)]
for i in range(1000):
k=np.random.choice(N)
v[k] = not v[k]
x = sum(v) / N
s.append(x)
pl.hist(s)
#%%
#Lei dos grandes números
np.random.seed(0)
S = [1, 2, 3, 4, 5, 6]
n_vals = np.logspace(1, 5, num=200)
s=[]
for val in n_vals:
np.random.seed(0)
n = int(val)
x = np.random.choice(S,n)
p=sum(x==3)/n
s.append([n,p])
s=np.array(s)
pl.semilogx(s[:,1])
pl.axhline(1./len(S),c='r')
#%%
#processos ergodicos
#%%
'''
s = 3000
for n in [1,2,3,5,10,50,100,200,400,1000]:
z=np.zeros(s)
for k in range(n):
x = np.random.uniform(-1, 1, s)
z+=x
x = z/np.sqrt(n)
pl.figure(n)
sns.distplot(y, bins=12, rug=True)
pl.title('N = ' + str())
'''
| 15.099099
| 42
| 0.50716
|
import numpy as np
import pylab as pl
def va_estoque():
p=np.array([0.1, 0.2, 0.6, 0.1])
x=np.random.rand()
if 0 < x <= p[0]:
return 1
elif p[0] < x <= p[0]+p[1]:
return 2
elif p[0]+p[1] < x <= p[0]+p[1]+p[2]:
return 3
elif p[0]+p[1]+p[2] < x <= 1.0:
return 4
v = [va_estoque() for i in range(100000)]
pl.hist(v,)
pl.show()
M, T, estoque, lucro = 3, 3, 10, 0
R = 10000
for i in range(R):
Y=va_estoque()
lucro += 20*min(estoque, Y)
estoque -= max(0, estoque-Y)
lucro -= 5*estoque
if estoque<M:
estoque += T
lucro -= 10*T
lucro /= R
print(M, T, lucro, estoque)
N, s = 100, []
for j in range(1000):
v = [True for i in range(N)]
for i in range(1000):
k=np.random.choice(N)
v[k] = not v[k]
x = sum(v) / N
s.append(x)
pl.hist(s)
np.random.seed(0)
S = [1, 2, 3, 4, 5, 6]
n_vals = np.logspace(1, 5, num=200)
s=[]
for val in n_vals:
np.random.seed(0)
n = int(val)
x = np.random.choice(S,n)
p=sum(x==3)/n
s.append([n,p])
s=np.array(s)
pl.semilogx(s[:,1])
pl.axhline(1./len(S),c='r')
| true
| true
|
79053f9ebf194c6de312509e2a055a7bbcf84e4f
| 2,993
|
py
|
Python
|
setup.py
|
stephanecollot/popmon
|
332ac3f79df1dd1c39b764c6d967e20f28ac124c
|
[
"MIT"
] | null | null | null |
setup.py
|
stephanecollot/popmon
|
332ac3f79df1dd1c39b764c6d967e20f28ac124c
|
[
"MIT"
] | null | null | null |
setup.py
|
stephanecollot/popmon
|
332ac3f79df1dd1c39b764c6d967e20f28ac124c
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
NAME = "popmon"
MAJOR = 0
REVISION = 3
PATCH = 8
DEV = False
# NOTE: also update version at: README.rst
with open("requirements.txt") as f:
REQUIREMENTS = f.read().splitlines()
# read the contents of abstract file
with open("README.rst", encoding="utf-8") as f:
long_description = f.read()
VERSION = "{major}.{revision}.{patch}".format(
major=MAJOR, revision=REVISION, patch=PATCH
)
FULL_VERSION = VERSION
if DEV:
FULL_VERSION += ".dev"
with open("requirements-test.txt") as f:
REQUIREMENTS += f.read().splitlines()
def write_version_py(filename: str = "popmon/version.py") -> None:
"""Write package version to version.py.
This will ensure that the version in version.py is in sync with us.
:param filename: The version.py to write too.
:type filename: str
"""
# Do not modify the indentation of version_str!
version_str = """\"\"\"THIS FILE IS AUTO-GENERATED BY SETUP.PY.\"\"\"
name = \"{name!s}\"
version = \"{version!s}\"
full_version = \"{full_version!s}\"
release = {is_release!s}
"""
with open(filename, "w") as version_file:
version_file.write(
version_str.format(
name=NAME.lower(),
version=VERSION,
full_version=FULL_VERSION,
is_release=not DEV,
)
)
def setup_package() -> None:
"""The main setup method.
It is responsible for setting up and installing the package.
"""
write_version_py()
setup(
name=NAME,
version=VERSION,
url="https://github.com/ing-bank/popmon",
license="MIT",
author="ING Wholesale Banking Advanced Analytics",
description="Monitor the stability of a pandas or spark dataset",
keywords="pandas spark data-science data-analysis monitoring statistics python jupyter ipython",
long_description=long_description,
long_description_content_type="text/x-rst",
python_requires=">=3.6",
packages=find_packages(),
install_requires=REQUIREMENTS,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
# files to be shipped with the installation, under: popmon/popmon/
# after installation, these can be found with the functions in resources.py
package_data=dict(
popmon=[
"visualization/templates/*.html",
"visualization/templates/assets/css/*.css",
"visualization/templates/assets/js/*.js",
"test_data/*.csv.gz",
"test_data/*.json*",
"notebooks/popmon*tutorial*.ipynb",
]
),
entry_points={
"console_scripts": ["popmon_run = popmon.pipeline.amazing_pipeline:run"]
},
)
if __name__ == "__main__":
setup_package()
| 29.633663
| 104
| 0.610758
|
from setuptools import find_packages, setup
NAME = "popmon"
MAJOR = 0
REVISION = 3
PATCH = 8
DEV = False
with open("requirements.txt") as f:
REQUIREMENTS = f.read().splitlines()
with open("README.rst", encoding="utf-8") as f:
long_description = f.read()
VERSION = "{major}.{revision}.{patch}".format(
major=MAJOR, revision=REVISION, patch=PATCH
)
FULL_VERSION = VERSION
if DEV:
FULL_VERSION += ".dev"
with open("requirements-test.txt") as f:
REQUIREMENTS += f.read().splitlines()
def write_version_py(filename: str = "popmon/version.py") -> None:
version_str = """\"\"\"THIS FILE IS AUTO-GENERATED BY SETUP.PY.\"\"\"
name = \"{name!s}\"
version = \"{version!s}\"
full_version = \"{full_version!s}\"
release = {is_release!s}
"""
with open(filename, "w") as version_file:
version_file.write(
version_str.format(
name=NAME.lower(),
version=VERSION,
full_version=FULL_VERSION,
is_release=not DEV,
)
)
def setup_package() -> None:
write_version_py()
setup(
name=NAME,
version=VERSION,
url="https://github.com/ing-bank/popmon",
license="MIT",
author="ING Wholesale Banking Advanced Analytics",
description="Monitor the stability of a pandas or spark dataset",
keywords="pandas spark data-science data-analysis monitoring statistics python jupyter ipython",
long_description=long_description,
long_description_content_type="text/x-rst",
python_requires=">=3.6",
packages=find_packages(),
install_requires=REQUIREMENTS,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_data=dict(
popmon=[
"visualization/templates/*.html",
"visualization/templates/assets/css/*.css",
"visualization/templates/assets/js/*.js",
"test_data/*.csv.gz",
"test_data/*.json*",
"notebooks/popmon*tutorial*.ipynb",
]
),
entry_points={
"console_scripts": ["popmon_run = popmon.pipeline.amazing_pipeline:run"]
},
)
if __name__ == "__main__":
setup_package()
| true
| true
|
790540aa671555a0ad89ba5876743c20e9324324
| 7,849
|
py
|
Python
|
hpvm/projects/hpvm-profiler/hpvm_profiler/__init__.py
|
vzyrianov/hpvm-autograd
|
521cc3b684531548aea75f9fe3cc673aaa4a2e90
|
[
"Apache-2.0"
] | null | null | null |
hpvm/projects/hpvm-profiler/hpvm_profiler/__init__.py
|
vzyrianov/hpvm-autograd
|
521cc3b684531548aea75f9fe3cc673aaa4a2e90
|
[
"Apache-2.0"
] | null | null | null |
hpvm/projects/hpvm-profiler/hpvm_profiler/__init__.py
|
vzyrianov/hpvm-autograd
|
521cc3b684531548aea75f9fe3cc673aaa4a2e90
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
from subprocess import PIPE, CalledProcessError
from typing import Iterable, List, Tuple, Union
import matplotlib.pyplot as plt
PathLike = Union[Path, str]
conf_opening, conf_closing = "+++++", "-----"
def profile_config_file(
binary_path: PathLike,
config_path: PathLike,
output_config_path: PathLike,
progress_bar: bool = True,
profile_filename: str = "profile_info.txt",
qos_filename: str = "final_accuracy",
) -> None:
r"""Profile an HPVM configuration file with an HPVM binary,
and write the updated configuration file to a given location.
The configuration file must have the baseline as the first configuration.
:param binary_path: Path to binary to be executed in profiling.
:param config_path: Path to config file (HPVM configuration format)
with configs to enumerate for profiling.
:param output_config_path: Path where the output configs are written.
The output config file has the same configs as the input `config_path` file,
but the performance and energy readings are updated.
:param progress_bar: If `True`, show a progress bar for number of configs already profiled.
:param profile_filename: Name of profile file generated by the binary (in current directory).
This defaults to "profile_info.txt" and should not be changed for HPVM binaries.
:param qos_filename: Name of QoS file generated by the binary (in current directory).
It contains a single float number as the QoS of this run.
This defaults to "final_accuracy" and should not be changed for HPVM binaries.
"""
# Read first line ("the float") and configs in config file
header, configs = read_hpvm_configs(Path(config_path))
if not configs:
raise ValueError("Config file with no configs is unsupported.")
# Modifies configs in place.
profile_configs(
binary_path,
configs[1:],
configs[0],
progress_bar,
profile_filename,
qos_filename,
)
write_hpvm_configs(header, configs, Path(output_config_path))
def profile_configs(
binary_path: PathLike,
configs: Iterable["Config"],
baseline_config: "Config",
progress_bar: bool = True,
profile_filename: str = "profile_info.txt",
qos_filename: str = "final_accuracy",
) -> None:
"""Profile a sequence of HPVM configs.
This function modifies argument `configs` in place."""
from tqdm import tqdm
baseline_time, baseline_acc = measure_config(binary_path, baseline_config)
iterable = tqdm(configs, desc="Configs profiled") if progress_bar else configs
for config in iterable:
time, acc = measure_config(binary_path, config, profile_filename, qos_filename)
speedup = baseline_time / time
config.update_profile_results(speedup, acc, baseline_acc)
return configs
def measure_config(
binary_path: PathLike,
config: "Config",
profile_filename: str = "profile_info.txt",
qos_filename: str = "final_accuracy",
):
from subprocess import check_call
from tempfile import NamedTemporaryFile
import os
temp_file = NamedTemporaryFile("w")
write_hpvm_configs("0.0", [config], Path(temp_file.name))
# Run binary_path binary,
# which generates `profile_filename` and `qos_filename` file in cwd.
try:
with open(os.devnull, "w") as f:
check_call([str(binary_path), "-c", str(temp_file.name)], stdout=f)
except CalledProcessError as e:
print("Output from the program:")
print(e.output)
raise e
time = _read_profile_file(Path(profile_filename))
acc = _read_qos_file(Path(qos_filename))
temp_file.close()
return time, acc
def plot_hpvm_configs(
config_path: PathLike,
save_to: PathLike = None,
show_qos_loss: bool = True,
**fig_kwargs,
) -> plt.Figure:
"""
Plot the QoS-speedup information in an HPVM configuration file.
It is recommended to profile the config file first (using `profile_configs`)
to obtain real speedup numbers.
This function creates a `matplotlib.pyplot.Figure`, plots on it, and returns it.
:param config_path: Path to the config file (HPVM configuration format).
:param save_to: File to save figure into. Default is None: don't save figure (just return it).
:param show_qos_loss: Show the loss of QoS on x axis of the figure. Defaults to True.
If False, will use (absolute) QoS instead of QoS loss.
:param fig_kwargs: Arguments to pass to `plt.subplots`.
"""
import numpy as np
_, configs = read_hpvm_configs(config_path)
get_qos = lambda c: c.qos_loss if show_qos_loss else c.qos
qos_speedup = np.array([(get_qos(c), c.speedup) for c in configs])
qoses, speedups = qos_speedup.T
fig, ax = plt.subplots(**fig_kwargs)
ax.scatter(qoses, speedups)
ax.set_xlabel("QoS Loss")
ax.set_ylabel("Speedup (X)")
if save_to:
fig.savefig(save_to, dpi=300)
return fig
class Config:
def __init__(
self,
conf_name: str,
speedup: float,
energy: float,
qos: float,
qos_loss: float,
config_body: List[str],
):
self.conf_name = conf_name
self.speedup = speedup
self.energy = energy
self.qos = qos
self.qos_loss = qos_loss
# We don't care about the information in this part, and we don't parse this.
self.config_body = config_body
def update_profile_results(self, speedup: float, qos: float, base_qos: float):
recorded_base_qos = self.qos + self.qos_loss
if abs(recorded_base_qos - base_qos) > 0.025:
raise ValueError(
f"Baseline QoS mismatch. Original: {recorded_base_qos}, measured: {base_qos}"
)
self.speedup = speedup
self.qos = qos
self.qos_loss = base_qos - qos
def __repr__(self) -> str:
header_fields = [
self.conf_name,
self.speedup,
self.energy,
self.qos,
self.qos_loss,
]
header = " ".join(str(field) for field in header_fields)
lines = [conf_opening, header, *self.config_body, conf_closing]
return "\n".join(lines)
__str__ = __repr__
def read_hpvm_configs(config_file: PathLike) -> Tuple[str, List[Config]]:
# def read_hpvm_configs(config_file, config_num, temp_file):
ret_configs = []
with open(config_file) as f:
text = f.read()
# There's 1 float sitting on the first line of config file.
# We don't use it, but want to keep that intact.
header, *configs = text.split(conf_opening)
header = header.strip()
for config_text in configs:
config_text = config_text.replace(conf_closing, "").strip()
config_header, *config_body = config_text.splitlines()
conf_name, *number_fields = config_header.split(" ")
speedup, energy, qos, qos_drop = [float(s) for s in number_fields]
ret_configs.append(
Config(conf_name, speedup, energy, qos, qos_drop, config_body)
)
return header, ret_configs
def write_hpvm_configs(header: str, configs: Iterable[Config], to_file: PathLike):
text_segs = [header] + [str(config) for config in configs]
with open(to_file, "w") as f:
f.write("\n".join(text_segs))
f.flush()
def _read_profile_file(profile_file_path: Path):
with profile_file_path.open() as f:
target_lines = [line.strip() for line in f if "Total Time" in line]
if len(target_lines) != 1:
raise RuntimeError(f"Profile {profile_file_path} malformed")
(target_line,) = target_lines
return float(target_line.split()[3])
def _read_qos_file(qos_file_path: Path):
with qos_file_path.open() as f:
return float(f.read().strip())
| 36.170507
| 98
| 0.673844
|
from pathlib import Path
from subprocess import PIPE, CalledProcessError
from typing import Iterable, List, Tuple, Union
import matplotlib.pyplot as plt
PathLike = Union[Path, str]
conf_opening, conf_closing = "+++++", "-----"
def profile_config_file(
binary_path: PathLike,
config_path: PathLike,
output_config_path: PathLike,
progress_bar: bool = True,
profile_filename: str = "profile_info.txt",
qos_filename: str = "final_accuracy",
) -> None:
header, configs = read_hpvm_configs(Path(config_path))
if not configs:
raise ValueError("Config file with no configs is unsupported.")
profile_configs(
binary_path,
configs[1:],
configs[0],
progress_bar,
profile_filename,
qos_filename,
)
write_hpvm_configs(header, configs, Path(output_config_path))
def profile_configs(
binary_path: PathLike,
configs: Iterable["Config"],
baseline_config: "Config",
progress_bar: bool = True,
profile_filename: str = "profile_info.txt",
qos_filename: str = "final_accuracy",
) -> None:
from tqdm import tqdm
baseline_time, baseline_acc = measure_config(binary_path, baseline_config)
iterable = tqdm(configs, desc="Configs profiled") if progress_bar else configs
for config in iterable:
time, acc = measure_config(binary_path, config, profile_filename, qos_filename)
speedup = baseline_time / time
config.update_profile_results(speedup, acc, baseline_acc)
return configs
def measure_config(
binary_path: PathLike,
config: "Config",
profile_filename: str = "profile_info.txt",
qos_filename: str = "final_accuracy",
):
from subprocess import check_call
from tempfile import NamedTemporaryFile
import os
temp_file = NamedTemporaryFile("w")
write_hpvm_configs("0.0", [config], Path(temp_file.name))
try:
with open(os.devnull, "w") as f:
check_call([str(binary_path), "-c", str(temp_file.name)], stdout=f)
except CalledProcessError as e:
print("Output from the program:")
print(e.output)
raise e
time = _read_profile_file(Path(profile_filename))
acc = _read_qos_file(Path(qos_filename))
temp_file.close()
return time, acc
def plot_hpvm_configs(
config_path: PathLike,
save_to: PathLike = None,
show_qos_loss: bool = True,
**fig_kwargs,
) -> plt.Figure:
import numpy as np
_, configs = read_hpvm_configs(config_path)
get_qos = lambda c: c.qos_loss if show_qos_loss else c.qos
qos_speedup = np.array([(get_qos(c), c.speedup) for c in configs])
qoses, speedups = qos_speedup.T
fig, ax = plt.subplots(**fig_kwargs)
ax.scatter(qoses, speedups)
ax.set_xlabel("QoS Loss")
ax.set_ylabel("Speedup (X)")
if save_to:
fig.savefig(save_to, dpi=300)
return fig
class Config:
def __init__(
self,
conf_name: str,
speedup: float,
energy: float,
qos: float,
qos_loss: float,
config_body: List[str],
):
self.conf_name = conf_name
self.speedup = speedup
self.energy = energy
self.qos = qos
self.qos_loss = qos_loss
self.config_body = config_body
def update_profile_results(self, speedup: float, qos: float, base_qos: float):
recorded_base_qos = self.qos + self.qos_loss
if abs(recorded_base_qos - base_qos) > 0.025:
raise ValueError(
f"Baseline QoS mismatch. Original: {recorded_base_qos}, measured: {base_qos}"
)
self.speedup = speedup
self.qos = qos
self.qos_loss = base_qos - qos
def __repr__(self) -> str:
header_fields = [
self.conf_name,
self.speedup,
self.energy,
self.qos,
self.qos_loss,
]
header = " ".join(str(field) for field in header_fields)
lines = [conf_opening, header, *self.config_body, conf_closing]
return "\n".join(lines)
__str__ = __repr__
def read_hpvm_configs(config_file: PathLike) -> Tuple[str, List[Config]]:
ret_configs = []
with open(config_file) as f:
text = f.read()
# We don't use it, but want to keep that intact.
header, *configs = text.split(conf_opening)
header = header.strip()
for config_text in configs:
config_text = config_text.replace(conf_closing, "").strip()
config_header, *config_body = config_text.splitlines()
conf_name, *number_fields = config_header.split(" ")
speedup, energy, qos, qos_drop = [float(s) for s in number_fields]
ret_configs.append(
Config(conf_name, speedup, energy, qos, qos_drop, config_body)
)
return header, ret_configs
def write_hpvm_configs(header: str, configs: Iterable[Config], to_file: PathLike):
text_segs = [header] + [str(config) for config in configs]
with open(to_file, "w") as f:
f.write("\n".join(text_segs))
f.flush()
def _read_profile_file(profile_file_path: Path):
with profile_file_path.open() as f:
target_lines = [line.strip() for line in f if "Total Time" in line]
if len(target_lines) != 1:
raise RuntimeError(f"Profile {profile_file_path} malformed")
(target_line,) = target_lines
return float(target_line.split()[3])
def _read_qos_file(qos_file_path: Path):
with qos_file_path.open() as f:
return float(f.read().strip())
| true
| true
|
790540ed21f59c903af9ace6954238fa467d6ce8
| 4,275
|
py
|
Python
|
src/lib/detectors/ctdet.py
|
Wastoon/XinTong_CenterNet
|
e4436d61b01a74fbc54bd33c4948ec932940661a
|
[
"MIT"
] | null | null | null |
src/lib/detectors/ctdet.py
|
Wastoon/XinTong_CenterNet
|
e4436d61b01a74fbc54bd33c4948ec932940661a
|
[
"MIT"
] | null | null | null |
src/lib/detectors/ctdet.py
|
Wastoon/XinTong_CenterNet
|
e4436d61b01a74fbc54bd33c4948ec932940661a
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
import os
try:
from external.nms import soft_nms
except:
print('NMS not imported! If you need it,'
' do \n cd $CenterNet_ROOT/src/lib/external \n make')
from models.decode import ctdet_decode
from models.utils import flip_tensor
from utils.image import get_affine_transform
from utils.post_process import ctdet_post_process
from utils.debugger import Debugger
from .base_detector import BaseDetector
class CtdetDetector(BaseDetector):
def __init__(self, opt):
super(CtdetDetector, self).__init__(opt)
def process(self, images, return_time=False):
with torch.no_grad():
output = self.model(images)[-1]
hm = output['hm'].sigmoid_()
wh = output['wh']
reg = output['reg'] if self.opt.reg_offset else None
if self.opt.flip_test:
hm = (hm[0:1] + flip_tensor(hm[1:2])) / 2
wh = (wh[0:1] + flip_tensor(wh[1:2])) / 2
reg = reg[0:1] if reg is not None else None
torch.cuda.synchronize()
forward_time = time.time()
dets = ctdet_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy()
dets = dets.reshape(1, -1, dets.shape[2])
dets = ctdet_post_process(
dets.copy(), [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'], self.opt.num_classes)
for j in range(1, self.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
dets[0][j][:, :4] /= scale
return dets[0]
def merge_outputs(self, detections):
results = {}
for j in range(1, self.num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0).astype(np.float32)
if len(self.scales) > 1 or self.opt.nms:
soft_nms(results[j], Nt=0.5, method=2)
scores = np.hstack(
[results[j][:, 4] for j in range(1, self.num_classes + 1)])
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.num_classes + 1):
keep_inds = (results[j][:, 4] >= thresh)
results[j] = results[j][keep_inds]
return results
def debug(self, debugger, images, dets, output, scale=1):
detection = dets.detach().cpu().numpy().copy()
detection[:, :, :4] *= self.opt.down_ratio
for i in range(1):
img = images[i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm_{:.1f}'.format(scale))
debugger.add_img(img, img_id='out_pred_{:.1f}'.format(scale))
for k in range(len(dets[i])):
if detection[i, k, 4] > self.opt.center_thresh:
debugger.add_coco_bbox(detection[i, k, :4], detection[i, k, -1],
detection[i, k, 4],
img_id='out_pred_{:.1f}'.format(scale))
def show_results(self, debugger, image, results):
debugger.add_img(image, img_id='ctdet')
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='ctdet')
debugger.show_all_imgs(pause=self.pause)
#prefix = image_name.split('.')[0]
#path = os.path.dirname(self.opt.det_output_path) + '/img'
#debugger.save_all_imgs(path, prefix)
def save_results_only(self, debugger, image, results, image_name):
debugger.add_img(image, img_id='ctdet')
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='ctdet')
prefix = image_name.split('.')[0]
path = os.path.dirname(self.opt.det_output_path) + '/img'
debugger.save_all_imgs(path, prefix)
| 38.513514
| 90
| 0.638363
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
import os
try:
from external.nms import soft_nms
except:
print('NMS not imported! If you need it,'
' do \n cd $CenterNet_ROOT/src/lib/external \n make')
from models.decode import ctdet_decode
from models.utils import flip_tensor
from utils.image import get_affine_transform
from utils.post_process import ctdet_post_process
from utils.debugger import Debugger
from .base_detector import BaseDetector
class CtdetDetector(BaseDetector):
def __init__(self, opt):
super(CtdetDetector, self).__init__(opt)
def process(self, images, return_time=False):
with torch.no_grad():
output = self.model(images)[-1]
hm = output['hm'].sigmoid_()
wh = output['wh']
reg = output['reg'] if self.opt.reg_offset else None
if self.opt.flip_test:
hm = (hm[0:1] + flip_tensor(hm[1:2])) / 2
wh = (wh[0:1] + flip_tensor(wh[1:2])) / 2
reg = reg[0:1] if reg is not None else None
torch.cuda.synchronize()
forward_time = time.time()
dets = ctdet_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy()
dets = dets.reshape(1, -1, dets.shape[2])
dets = ctdet_post_process(
dets.copy(), [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'], self.opt.num_classes)
for j in range(1, self.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
dets[0][j][:, :4] /= scale
return dets[0]
def merge_outputs(self, detections):
results = {}
for j in range(1, self.num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0).astype(np.float32)
if len(self.scales) > 1 or self.opt.nms:
soft_nms(results[j], Nt=0.5, method=2)
scores = np.hstack(
[results[j][:, 4] for j in range(1, self.num_classes + 1)])
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.num_classes + 1):
keep_inds = (results[j][:, 4] >= thresh)
results[j] = results[j][keep_inds]
return results
def debug(self, debugger, images, dets, output, scale=1):
detection = dets.detach().cpu().numpy().copy()
detection[:, :, :4] *= self.opt.down_ratio
for i in range(1):
img = images[i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm_{:.1f}'.format(scale))
debugger.add_img(img, img_id='out_pred_{:.1f}'.format(scale))
for k in range(len(dets[i])):
if detection[i, k, 4] > self.opt.center_thresh:
debugger.add_coco_bbox(detection[i, k, :4], detection[i, k, -1],
detection[i, k, 4],
img_id='out_pred_{:.1f}'.format(scale))
def show_results(self, debugger, image, results):
debugger.add_img(image, img_id='ctdet')
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='ctdet')
debugger.show_all_imgs(pause=self.pause)
def save_results_only(self, debugger, image, results, image_name):
debugger.add_img(image, img_id='ctdet')
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='ctdet')
prefix = image_name.split('.')[0]
path = os.path.dirname(self.opt.det_output_path) + '/img'
debugger.save_all_imgs(path, prefix)
| true
| true
|
7905417162a2b2cbb3342bd2ad072af2d82cbc6c
| 2,975
|
py
|
Python
|
scripts/pyqtgraph-develop/pyqtgraph/widgets/GradientWidget.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
scripts/pyqtgraph-develop/pyqtgraph/widgets/GradientWidget.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
scripts/pyqtgraph-develop/pyqtgraph/widgets/GradientWidget.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from ..Qt import QtGui, QtCore
from .GraphicsView import GraphicsView
from ..graphicsItems.GradientEditorItem import GradientEditorItem
import weakref
import numpy as np
__all__ = ['GradientWidget']
class GradientWidget(GraphicsView):
"""
Widget displaying an editable color gradient. The user may add, move, recolor,
or remove colors from the gradient. Additionally, a context menu allows the
user to select from pre-defined gradients.
"""
sigGradientChanged = QtCore.Signal(object)
sigGradientChangeFinished = QtCore.Signal(object)
def __init__(self, parent=None, orientation='bottom', *args, **kargs):
"""
The *orientation* argument may be 'bottom', 'top', 'left', or 'right'
indicating whether the gradient is displayed horizontally (top, bottom)
or vertically (left, right) and on what side of the gradient the editable
ticks will appear.
All other arguments are passed to
:func:`GradientEditorItem.__init__ <pyqtgraph.GradientEditorItem.__init__>`.
Note: For convenience, this class wraps methods from
:class:`GradientEditorItem <pyqtgraph.GradientEditorItem>`.
"""
GraphicsView.__init__(self, parent, useOpenGL=False, background=None)
self.maxDim = 31
kargs['tickPen'] = 'k'
self.item = GradientEditorItem(*args, **kargs)
self.item.sigGradientChanged.connect(self.sigGradientChanged)
self.item.sigGradientChangeFinished.connect(self.sigGradientChangeFinished)
self.setCentralItem(self.item)
self.setOrientation(orientation)
self.setCacheMode(self.CacheNone)
self.setRenderHints(QtGui.QPainter.Antialiasing | QtGui.QPainter.TextAntialiasing)
self.setFrameStyle(QtGui.QFrame.NoFrame | QtGui.QFrame.Plain)
#self.setBackgroundRole(QtGui.QPalette.NoRole)
#self.setBackgroundBrush(QtGui.QBrush(QtCore.Qt.NoBrush))
#self.setAutoFillBackground(False)
#self.setAttribute(QtCore.Qt.WA_PaintOnScreen, False)
#self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent, True)
def setOrientation(self, ort):
"""Set the orientation of the widget. May be one of 'bottom', 'top',
'left', or 'right'."""
self.item.setOrientation(ort)
self.orientation = ort
self.setMaxDim()
def setMaxDim(self, mx=None):
if mx is None:
mx = self.maxDim
else:
self.maxDim = mx
if self.orientation in ['bottom', 'top']:
self.setFixedHeight(mx)
self.setMaximumWidth(16777215)
else:
self.setFixedWidth(mx)
self.setMaximumHeight(16777215)
def __getattr__(self, attr):
### wrap methods from GradientEditorItem
return getattr(self.item, attr)
| 39.666667
| 91
| 0.647059
|
from ..Qt import QtGui, QtCore
from .GraphicsView import GraphicsView
from ..graphicsItems.GradientEditorItem import GradientEditorItem
import weakref
import numpy as np
__all__ = ['GradientWidget']
class GradientWidget(GraphicsView):
sigGradientChanged = QtCore.Signal(object)
sigGradientChangeFinished = QtCore.Signal(object)
def __init__(self, parent=None, orientation='bottom', *args, **kargs):
GraphicsView.__init__(self, parent, useOpenGL=False, background=None)
self.maxDim = 31
kargs['tickPen'] = 'k'
self.item = GradientEditorItem(*args, **kargs)
self.item.sigGradientChanged.connect(self.sigGradientChanged)
self.item.sigGradientChangeFinished.connect(self.sigGradientChangeFinished)
self.setCentralItem(self.item)
self.setOrientation(orientation)
self.setCacheMode(self.CacheNone)
self.setRenderHints(QtGui.QPainter.Antialiasing | QtGui.QPainter.TextAntialiasing)
self.setFrameStyle(QtGui.QFrame.NoFrame | QtGui.QFrame.Plain)
def setOrientation(self, ort):
self.item.setOrientation(ort)
self.orientation = ort
self.setMaxDim()
def setMaxDim(self, mx=None):
if mx is None:
mx = self.maxDim
else:
self.maxDim = mx
if self.orientation in ['bottom', 'top']:
self.setFixedHeight(mx)
self.setMaximumWidth(16777215)
else:
self.setFixedWidth(mx)
self.setMaximumHeight(16777215)
def __getattr__(self, attr):
| true
| true
|
790542462fd8d3a7f1cc790da9a7f959c3a24912
| 21,034
|
py
|
Python
|
hdbscan/prediction.py
|
johnfischbeck/hdbscan
|
7499b53f9edca09c6a674a93e3d32bbbaf655b5a
|
[
"BSD-3-Clause"
] | null | null | null |
hdbscan/prediction.py
|
johnfischbeck/hdbscan
|
7499b53f9edca09c6a674a93e3d32bbbaf655b5a
|
[
"BSD-3-Clause"
] | null | null | null |
hdbscan/prediction.py
|
johnfischbeck/hdbscan
|
7499b53f9edca09c6a674a93e3d32bbbaf655b5a
|
[
"BSD-3-Clause"
] | null | null | null |
# Support various prediction methods for predicting cluster membership
# of new or unseen points. There are several ways to interpret how
# to do this correctly, so we provide several methods for
# the different use cases that may arise.
import numpy as np
from sklearn.neighbors import KDTree, BallTree
from .dist_metrics import DistanceMetric
from ._hdbscan_tree import compute_stability, labelling_at_cut, recurse_leaf_dfs
from ._prediction_utils import (get_tree_row_with_child,
dist_membership_vector,
outlier_membership_vector,
prob_in_some_cluster,
all_points_dist_membership_vector,
all_points_outlier_membership_vector,
all_points_prob_in_some_cluster)
from warnings import warn
class PredictionData(object):
"""
Extra data that allows for faster prediction if cached.
Parameters
----------
data : array (n_samples, n_features)
The original data set that was clustered
condensed_tree : CondensedTree
The condensed tree object created by a clustering
min_samples : int
The min_samples value used in clustering
tree_type : string, optional
Which type of space tree to use for core distance computation.
One of:
* ``kdtree``
* ``balltree``
metric : string, optional
The metric used to determine distance for the clustering.
This is the metric that will be used for the space tree to determine
core distances etc.
**kwargs :
Any further arguments to the metric.
Attributes
----------
raw_data : array (n_samples, n_features)
The original data set that was clustered
tree : KDTree or BallTree
A space partitioning tree that can be queried for nearest neighbors.
core_distances : array (n_samples,)
The core distances for every point in the original data set.
cluster_map : dict
A dictionary mapping cluster numbers in the condensed tree to labels
in the final selected clustering.
cluster_tree : structured array
A version of the condensed tree that only contains clusters, not
individual points.
max_lambdas : dict
A dictionary mapping cluster numbers in the condensed tree to the
maximum lambda value seen in that cluster.
"""
_tree_type_map = {'kdtree': KDTree, 'balltree': BallTree}
def _clusters_below(self, cluster):
result = []
to_process = [cluster]
while to_process:
result.extend(to_process)
to_process = \
self.cluster_tree['child'][np.in1d(self.cluster_tree['parent'],
to_process)]
to_process = to_process.tolist()
return result
def _recurse_leaf_dfs(self, current_node):
children = self.cluster_tree[self.cluster_tree['parent'] ==
current_node]['child']
if len(children) == 0:
return [current_node, ]
else:
return sum(
[recurse_leaf_dfs(self.cluster_tree, child) for child in children], [])
def __init__(self, data, condensed_tree, min_samples,
tree_type='kdtree', metric='euclidean', **kwargs):
self.raw_data = data
self.tree = self._tree_type_map[tree_type](self.raw_data,
metric=metric, **kwargs)
self.core_distances = self.tree.query(data, k=min_samples)[0][:, -1]
self.dist_metric = DistanceMetric.get_metric(metric, **kwargs)
selected_clusters = condensed_tree._select_clusters()
# raw_condensed_tree = condensed_tree.to_numpy()
raw_condensed_tree = condensed_tree._raw_tree
self.cluster_map = {c: n for n, c in enumerate(sorted(list(selected_clusters)))}
self.reverse_cluster_map = {n: c for c, n in self.cluster_map.items()}
self.cluster_tree = raw_condensed_tree[raw_condensed_tree['child_size']
> 1]
self.max_lambdas = {}
self.leaf_max_lambdas = {}
self.exemplars = []
all_clusters = set(np.hstack([self.cluster_tree['parent'],
self.cluster_tree['child']]))
for cluster in all_clusters:
self.leaf_max_lambdas[cluster] = raw_condensed_tree['lambda_val'][
raw_condensed_tree['parent'] == cluster].max()
for cluster in selected_clusters:
self.max_lambdas[cluster] = \
raw_condensed_tree['lambda_val'][raw_condensed_tree['parent']
== cluster].max()
for sub_cluster in self._clusters_below(cluster):
self.cluster_map[sub_cluster] = self.cluster_map[cluster]
self.max_lambdas[sub_cluster] = self.max_lambdas[cluster]
cluster_exemplars = np.array([], dtype=np.int64)
for leaf in self._recurse_leaf_dfs(cluster):
leaf_max_lambda = raw_condensed_tree['lambda_val'][
raw_condensed_tree['parent'] == leaf].max()
points = raw_condensed_tree['child'][
(raw_condensed_tree['parent'] == leaf) &
(raw_condensed_tree['lambda_val'] == leaf_max_lambda)]
cluster_exemplars = np.hstack([cluster_exemplars, points])
self.exemplars.append(self.raw_data[cluster_exemplars])
def _find_neighbor_and_lambda(neighbor_indices, neighbor_distances,
core_distances, min_samples):
"""
Find the nearest mutual reachability neighbor of a point, and compute
the associated lambda value for the point, given the mutual reachability
distance to a nearest neighbor.
Parameters
----------
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
neighbor : int
The index into the full raw data set of the nearest mutual reachability
distance neighbor of the point.
lambda_ : float
The lambda value at which this point joins/merges with `neighbor`.
"""
neighbor_core_distances = core_distances[neighbor_indices]
point_core_distances = neighbor_distances[min_samples] * np.ones(
neighbor_indices.shape[0])
mr_distances = np.vstack((
neighbor_core_distances,
point_core_distances,
neighbor_distances
)).max(axis=0)
nn_index = mr_distances.argmin()
nearest_neighbor = neighbor_indices[nn_index]
if mr_distances[nn_index] > 0.0:
lambda_ = 1. / mr_distances[nn_index]
else:
lambda_ = np.finfo(np.double).max
return nearest_neighbor, lambda_
def _extend_condensed_tree(tree, neighbor_indices, neighbor_distances,
core_distances, min_samples):
"""
Create a new condensed tree with an additional point added, allowing for
computations as if this point had been part of the original tree. Note
that this makes as little change to the tree as possible, with no
re-optimizing/re-condensing so that the selected clusters remain
effectively unchanged.
Parameters
----------
tree : structured array
The raw format condensed tree to update.
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
new_tree : structured array
The original tree with an extra row providing the parent cluster
and lambda information for a new point given index -1.
"""
tree_root = tree['parent'].min()
nearest_neighbor, lambda_ = _find_neighbor_and_lambda(neighbor_indices,
neighbor_distances,
core_distances,
min_samples
)
neighbor_tree_row = get_tree_row_with_child(tree, nearest_neighbor)
potential_cluster = neighbor_tree_row['parent']
if neighbor_tree_row['lambda_val'] <= lambda_:
# New point departs with the old
new_tree_row = (potential_cluster, -1, 1,
neighbor_tree_row['lambda_val'])
else:
# Find appropriate cluster based on lambda of new point
while potential_cluster > tree_root and \
tree[tree['child'] ==
potential_cluster]['lambda_val'] >= lambda_:
potential_cluster = tree['parent'][tree['child']
== potential_cluster][0]
new_tree_row = (potential_cluster, -1, 1, lambda_)
return np.append(tree, new_tree_row)
def _find_cluster_and_probability(tree, cluster_tree, neighbor_indices,
neighbor_distances, core_distances,
cluster_map, max_lambdas,
min_samples):
"""
Return the cluster label (of the original clustering) and membership
probability of a new data point.
Parameters
----------
tree : CondensedTree
The condensed tree associated with the clustering.
cluster_tree : structured_array
The raw form of the condensed tree with only cluster information (no
data on individual points). This is significantly more compact.
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
cluster_map : dict
A dictionary mapping cluster numbers in the condensed tree to labels
in the final selected clustering.
max_lambdas : dict
A dictionary mapping cluster numbers in the condensed tree to the
maximum lambda value seen in that cluster.
min_samples : int
The min_samples value used to generate core distances.
"""
raw_tree = tree._raw_tree
tree_root = cluster_tree['parent'].min()
nearest_neighbor, lambda_ = _find_neighbor_and_lambda(neighbor_indices,
neighbor_distances,
core_distances,
min_samples
)
neighbor_tree_row = get_tree_row_with_child(raw_tree, nearest_neighbor)
potential_cluster = neighbor_tree_row['parent']
if neighbor_tree_row['lambda_val'] > lambda_:
# Find appropriate cluster based on lambda of new point
while potential_cluster > tree_root and \
cluster_tree['lambda_val'][cluster_tree['child']
== potential_cluster] >= lambda_:
potential_cluster = cluster_tree['parent'][cluster_tree['child']
== potential_cluster][0]
if potential_cluster in cluster_map:
cluster_label = cluster_map[potential_cluster]
else:
cluster_label = -1
if cluster_label >= 0:
max_lambda = max_lambdas[potential_cluster]
if max_lambda > 0.0:
lambda_ = min(max_lambda, lambda_)
prob = (lambda_ / max_lambda)
else:
prob = 1.0
else:
prob = 0.0
return cluster_label, prob
def approximate_predict(clusterer, points_to_predict):
"""Predict the cluster label of new points. The returned labels
will be those of the original clustering found by ``clusterer``,
and therefore are not (necessarily) the cluster labels that would
be found by clustering the original data combined with
``points_to_predict``, hence the 'approximate' label.
If you simply wish to assign new points to an existing clustering
in the 'best' way possible, this is the function to use. If you
want to predict how ``points_to_predict`` would cluster with
the original data under HDBSCAN the most efficient existing approach
is to simply recluster with the new point(s) added to the original dataset.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
Returns
-------
labels : array (n_samples,)
The predicted labels of the ``points_to_predict``
probabilities : array (n_samples,)
The soft cluster scores for each of the ``points_to_predict``
See Also
--------
:py:func:`hdbscan.predict.membership_vector`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
if clusterer.prediction_data_ is None:
raise ValueError('Clusterer does not have prediction data!'
' Try fitting with prediction_data=True set,'
' or run generate_prediction_data on the clusterer')
points_to_predict = np.asarray(points_to_predict)
if points_to_predict.shape[1] != \
clusterer.prediction_data_.raw_data.shape[1]:
raise ValueError('New points dimension does not match fit data!')
if clusterer.prediction_data_.cluster_tree.shape[0] == 0:
warn('Clusterer does not have any defined clusters, new data'
' will be automatically predicted as noise.')
labels = -1 * np.ones(points_to_predict.shape[0], dtype=np.int32)
probabilities = np.zeros(points_to_predict.shape[0], dtype=np.float32)
return labels, probabilities
labels = np.empty(points_to_predict.shape[0], dtype=np.int)
probabilities = np.empty(points_to_predict.shape[0], dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
clusterer.prediction_data_.tree.query(points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
label, prob = _find_cluster_and_probability(
clusterer.condensed_tree_,
clusterer.prediction_data_.cluster_tree,
neighbor_indices[i],
neighbor_distances[i],
clusterer.prediction_data_.core_distances,
clusterer.prediction_data_.cluster_map,
clusterer.prediction_data_.max_lambdas,
min_samples
)
labels[i] = label
probabilities[i] = prob
return labels, probabilities
def membership_vector(clusterer, points_to_predict):
"""Predict soft cluster membership. The result produces a vector
for each point in ``points_to_predict`` that gives a probability that
the given point is a member of a cluster for each of the selected clusters
of the ``clusterer``.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` is a member of cluster ``j`` is
in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
clusters = np.array(
sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
result = np.empty((points_to_predict.shape[0], clusters.shape[0]),
dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
clusterer.prediction_data_.tree.query(points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
# We need to find where in the tree the new point would go
# for the purposes of outlier membership approximation
nearest_neighbor, lambda_ = \
_find_neighbor_and_lambda(
neighbor_indices[i],
neighbor_distances[i],
clusterer.prediction_data_.core_distances,
min_samples)
neighbor_tree_row = get_tree_row_with_child(
clusterer.condensed_tree_._raw_tree, nearest_neighbor)
if neighbor_tree_row['lambda_val'] <= lambda_:
lambda_ = neighbor_tree_row['lambda_val']
distance_vec = dist_membership_vector(
points_to_predict[i],
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vec = outlier_membership_vector(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result[i] = distance_vec ** 0.5 * outlier_vec ** 2.0
result[i] /= result[i].sum()
result[i] *= prob_in_some_cluster(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
return result
def all_points_membership_vectors(clusterer):
"""Predict soft cluster membership vectors for all points in the
original dataset the clusterer was trained on. This function is more
efficient by making use of the fact that all points are already in the
condensed tree, and processing in bulk.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
This method does not work if the clusterer was trained
with ``metric='precomputed'``.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` of the original dataset is a member of
cluster ``j`` is in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
clusters = np.array(sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
all_points = clusterer.prediction_data_.raw_data
# When no clusters found, return array of 0's
if clusters.size == 0:
return np.zeros(all_points.shape[0])
distance_vecs = all_points_dist_membership_vector(
all_points,
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vecs = all_points_outlier_membership_vector(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
in_cluster_probs = all_points_prob_in_some_cluster(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result = distance_vecs * outlier_vecs
row_sums = result.sum(axis=1)
result = result / row_sums[:, np.newaxis]
result *= in_cluster_probs[:, np.newaxis]
return result
| 37.967509
| 99
| 0.636256
|
import numpy as np
from sklearn.neighbors import KDTree, BallTree
from .dist_metrics import DistanceMetric
from ._hdbscan_tree import compute_stability, labelling_at_cut, recurse_leaf_dfs
from ._prediction_utils import (get_tree_row_with_child,
dist_membership_vector,
outlier_membership_vector,
prob_in_some_cluster,
all_points_dist_membership_vector,
all_points_outlier_membership_vector,
all_points_prob_in_some_cluster)
from warnings import warn
class PredictionData(object):
_tree_type_map = {'kdtree': KDTree, 'balltree': BallTree}
def _clusters_below(self, cluster):
result = []
to_process = [cluster]
while to_process:
result.extend(to_process)
to_process = \
self.cluster_tree['child'][np.in1d(self.cluster_tree['parent'],
to_process)]
to_process = to_process.tolist()
return result
def _recurse_leaf_dfs(self, current_node):
children = self.cluster_tree[self.cluster_tree['parent'] ==
current_node]['child']
if len(children) == 0:
return [current_node, ]
else:
return sum(
[recurse_leaf_dfs(self.cluster_tree, child) for child in children], [])
def __init__(self, data, condensed_tree, min_samples,
tree_type='kdtree', metric='euclidean', **kwargs):
self.raw_data = data
self.tree = self._tree_type_map[tree_type](self.raw_data,
metric=metric, **kwargs)
self.core_distances = self.tree.query(data, k=min_samples)[0][:, -1]
self.dist_metric = DistanceMetric.get_metric(metric, **kwargs)
selected_clusters = condensed_tree._select_clusters()
raw_condensed_tree = condensed_tree._raw_tree
self.cluster_map = {c: n for n, c in enumerate(sorted(list(selected_clusters)))}
self.reverse_cluster_map = {n: c for c, n in self.cluster_map.items()}
self.cluster_tree = raw_condensed_tree[raw_condensed_tree['child_size']
> 1]
self.max_lambdas = {}
self.leaf_max_lambdas = {}
self.exemplars = []
all_clusters = set(np.hstack([self.cluster_tree['parent'],
self.cluster_tree['child']]))
for cluster in all_clusters:
self.leaf_max_lambdas[cluster] = raw_condensed_tree['lambda_val'][
raw_condensed_tree['parent'] == cluster].max()
for cluster in selected_clusters:
self.max_lambdas[cluster] = \
raw_condensed_tree['lambda_val'][raw_condensed_tree['parent']
== cluster].max()
for sub_cluster in self._clusters_below(cluster):
self.cluster_map[sub_cluster] = self.cluster_map[cluster]
self.max_lambdas[sub_cluster] = self.max_lambdas[cluster]
cluster_exemplars = np.array([], dtype=np.int64)
for leaf in self._recurse_leaf_dfs(cluster):
leaf_max_lambda = raw_condensed_tree['lambda_val'][
raw_condensed_tree['parent'] == leaf].max()
points = raw_condensed_tree['child'][
(raw_condensed_tree['parent'] == leaf) &
(raw_condensed_tree['lambda_val'] == leaf_max_lambda)]
cluster_exemplars = np.hstack([cluster_exemplars, points])
self.exemplars.append(self.raw_data[cluster_exemplars])
def _find_neighbor_and_lambda(neighbor_indices, neighbor_distances,
core_distances, min_samples):
neighbor_core_distances = core_distances[neighbor_indices]
point_core_distances = neighbor_distances[min_samples] * np.ones(
neighbor_indices.shape[0])
mr_distances = np.vstack((
neighbor_core_distances,
point_core_distances,
neighbor_distances
)).max(axis=0)
nn_index = mr_distances.argmin()
nearest_neighbor = neighbor_indices[nn_index]
if mr_distances[nn_index] > 0.0:
lambda_ = 1. / mr_distances[nn_index]
else:
lambda_ = np.finfo(np.double).max
return nearest_neighbor, lambda_
def _extend_condensed_tree(tree, neighbor_indices, neighbor_distances,
core_distances, min_samples):
tree_root = tree['parent'].min()
nearest_neighbor, lambda_ = _find_neighbor_and_lambda(neighbor_indices,
neighbor_distances,
core_distances,
min_samples
)
neighbor_tree_row = get_tree_row_with_child(tree, nearest_neighbor)
potential_cluster = neighbor_tree_row['parent']
if neighbor_tree_row['lambda_val'] <= lambda_:
new_tree_row = (potential_cluster, -1, 1,
neighbor_tree_row['lambda_val'])
else:
while potential_cluster > tree_root and \
tree[tree['child'] ==
potential_cluster]['lambda_val'] >= lambda_:
potential_cluster = tree['parent'][tree['child']
== potential_cluster][0]
new_tree_row = (potential_cluster, -1, 1, lambda_)
return np.append(tree, new_tree_row)
def _find_cluster_and_probability(tree, cluster_tree, neighbor_indices,
neighbor_distances, core_distances,
cluster_map, max_lambdas,
min_samples):
raw_tree = tree._raw_tree
tree_root = cluster_tree['parent'].min()
nearest_neighbor, lambda_ = _find_neighbor_and_lambda(neighbor_indices,
neighbor_distances,
core_distances,
min_samples
)
neighbor_tree_row = get_tree_row_with_child(raw_tree, nearest_neighbor)
potential_cluster = neighbor_tree_row['parent']
if neighbor_tree_row['lambda_val'] > lambda_:
while potential_cluster > tree_root and \
cluster_tree['lambda_val'][cluster_tree['child']
== potential_cluster] >= lambda_:
potential_cluster = cluster_tree['parent'][cluster_tree['child']
== potential_cluster][0]
if potential_cluster in cluster_map:
cluster_label = cluster_map[potential_cluster]
else:
cluster_label = -1
if cluster_label >= 0:
max_lambda = max_lambdas[potential_cluster]
if max_lambda > 0.0:
lambda_ = min(max_lambda, lambda_)
prob = (lambda_ / max_lambda)
else:
prob = 1.0
else:
prob = 0.0
return cluster_label, prob
def approximate_predict(clusterer, points_to_predict):
if clusterer.prediction_data_ is None:
raise ValueError('Clusterer does not have prediction data!'
' Try fitting with prediction_data=True set,'
' or run generate_prediction_data on the clusterer')
points_to_predict = np.asarray(points_to_predict)
if points_to_predict.shape[1] != \
clusterer.prediction_data_.raw_data.shape[1]:
raise ValueError('New points dimension does not match fit data!')
if clusterer.prediction_data_.cluster_tree.shape[0] == 0:
warn('Clusterer does not have any defined clusters, new data'
' will be automatically predicted as noise.')
labels = -1 * np.ones(points_to_predict.shape[0], dtype=np.int32)
probabilities = np.zeros(points_to_predict.shape[0], dtype=np.float32)
return labels, probabilities
labels = np.empty(points_to_predict.shape[0], dtype=np.int)
probabilities = np.empty(points_to_predict.shape[0], dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
clusterer.prediction_data_.tree.query(points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
label, prob = _find_cluster_and_probability(
clusterer.condensed_tree_,
clusterer.prediction_data_.cluster_tree,
neighbor_indices[i],
neighbor_distances[i],
clusterer.prediction_data_.core_distances,
clusterer.prediction_data_.cluster_map,
clusterer.prediction_data_.max_lambdas,
min_samples
)
labels[i] = label
probabilities[i] = prob
return labels, probabilities
def membership_vector(clusterer, points_to_predict):
clusters = np.array(
sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
result = np.empty((points_to_predict.shape[0], clusters.shape[0]),
dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
clusterer.prediction_data_.tree.query(points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
nearest_neighbor, lambda_ = \
_find_neighbor_and_lambda(
neighbor_indices[i],
neighbor_distances[i],
clusterer.prediction_data_.core_distances,
min_samples)
neighbor_tree_row = get_tree_row_with_child(
clusterer.condensed_tree_._raw_tree, nearest_neighbor)
if neighbor_tree_row['lambda_val'] <= lambda_:
lambda_ = neighbor_tree_row['lambda_val']
distance_vec = dist_membership_vector(
points_to_predict[i],
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vec = outlier_membership_vector(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result[i] = distance_vec ** 0.5 * outlier_vec ** 2.0
result[i] /= result[i].sum()
result[i] *= prob_in_some_cluster(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
return result
def all_points_membership_vectors(clusterer):
clusters = np.array(sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
all_points = clusterer.prediction_data_.raw_data
if clusters.size == 0:
return np.zeros(all_points.shape[0])
distance_vecs = all_points_dist_membership_vector(
all_points,
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vecs = all_points_outlier_membership_vector(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
in_cluster_probs = all_points_prob_in_some_cluster(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result = distance_vecs * outlier_vecs
row_sums = result.sum(axis=1)
result = result / row_sums[:, np.newaxis]
result *= in_cluster_probs[:, np.newaxis]
return result
| true
| true
|
790543d4a494b513f69af154e5eff999ab5252b1
| 1,659
|
py
|
Python
|
imagersite/imager_images/models.py
|
Loaye/django-imager-group
|
a4bdd285b8063d2553f59f2a78aaef0fcfd0c95d
|
[
"MIT"
] | null | null | null |
imagersite/imager_images/models.py
|
Loaye/django-imager-group
|
a4bdd285b8063d2553f59f2a78aaef0fcfd0c95d
|
[
"MIT"
] | null | null | null |
imagersite/imager_images/models.py
|
Loaye/django-imager-group
|
a4bdd285b8063d2553f59f2a78aaef0fcfd0c95d
|
[
"MIT"
] | 2
|
2017-11-29T23:33:53.000Z
|
2017-12-05T22:36:09.000Z
|
"""Model module for images."""
from django.db import models
from django.contrib.auth.models import User
from imager_profile.models import ImagerProfile
# Create your models here.
class ImageBaseClass(models.Model):
"""Base class for Photo and Album classes."""
PRIVATE = 'PRVT'
SHARED = 'SHRD'
PUBLIC = 'PBLC'
PUBLISHED = ((PRIVATE, 'private'),
(SHARED, 'shared'),
(PUBLIC, 'public'))
title = models.CharField(max_length=180)
description = models.TextField(max_length=500, blank=True, null=True)
date_modified = models.DateField(auto_now=True)
date_published = models.DateField(blank=True, null=True)
published = models.CharField(choices=PUBLISHED, max_length=8)
class Meta:
"""Meta."""
abstract = True
class Photo(ImageBaseClass):
"""Photo model."""
user = models.ForeignKey(User, on_delete=models.CASCADE,
related_name='photo')
image = models.ImageField(upload_to='images')
date_uploaded = models.DateField(editable=False, auto_now_add=True)
def __str__(self):
"""Print function displays username."""
return self.title
class Album(ImageBaseClass):
"""Album model."""
user = models.ForeignKey(User, on_delete=models.CASCADE,
related_name='album')
cover = models.ImageField(upload_to='images')
date_created = models.DateField(editable=False, auto_now_add=True)
photos = models.ManyToManyField(Photo, related_name='albums', blank=True)
def __str__(self):
"""Print function displays username."""
return self.title
| 29.625
| 77
| 0.65642
|
from django.db import models
from django.contrib.auth.models import User
from imager_profile.models import ImagerProfile
class ImageBaseClass(models.Model):
PRIVATE = 'PRVT'
SHARED = 'SHRD'
PUBLIC = 'PBLC'
PUBLISHED = ((PRIVATE, 'private'),
(SHARED, 'shared'),
(PUBLIC, 'public'))
title = models.CharField(max_length=180)
description = models.TextField(max_length=500, blank=True, null=True)
date_modified = models.DateField(auto_now=True)
date_published = models.DateField(blank=True, null=True)
published = models.CharField(choices=PUBLISHED, max_length=8)
class Meta:
abstract = True
class Photo(ImageBaseClass):
user = models.ForeignKey(User, on_delete=models.CASCADE,
related_name='photo')
image = models.ImageField(upload_to='images')
date_uploaded = models.DateField(editable=False, auto_now_add=True)
def __str__(self):
return self.title
class Album(ImageBaseClass):
user = models.ForeignKey(User, on_delete=models.CASCADE,
related_name='album')
cover = models.ImageField(upload_to='images')
date_created = models.DateField(editable=False, auto_now_add=True)
photos = models.ManyToManyField(Photo, related_name='albums', blank=True)
def __str__(self):
return self.title
| true
| true
|
79054540a63ad226d77b45a013285b272dde3277
| 3,996
|
py
|
Python
|
Model/lookalike-model/lookalike_model/trainer/lookalike_trainer_tfrecords.py
|
rangaswamymr/incubator-bluemarlin
|
6cb60b2a41edc6509377f9eacb7660d199a9485b
|
[
"Apache-2.0"
] | 21
|
2019-10-08T16:23:44.000Z
|
2020-04-08T23:14:36.000Z
|
Model/lookalike-model/lookalike_model/trainer/lookalike_trainer_tfrecords.py
|
rangaswamymr/incubator-bluemarlin
|
6cb60b2a41edc6509377f9eacb7660d199a9485b
|
[
"Apache-2.0"
] | 162
|
2019-10-26T05:30:04.000Z
|
2022-03-30T12:44:41.000Z
|
Model/lookalike-model/lookalike_model/trainer/lookalike_trainer_tfrecords.py
|
rangaswamymr/incubator-bluemarlin
|
6cb60b2a41edc6509377f9eacb7660d199a9485b
|
[
"Apache-2.0"
] | 33
|
2019-10-09T01:31:12.000Z
|
2022-03-29T08:00:36.000Z
|
import numpy as np
import os, time
import random
import tensorflow as tf
from lookalike_model.trainer.model_new import Model
import argparse
random.seed(1234)
# adding arguments for tfrecord directory and the checkpoint directory
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, help="input data tfrecords dir location")
parser.add_argument("--check_point_dir", type=str, help="Check Point dir location")
args, unknown = parser.parse_known_args()
if len(unknown) != 0:
print("unknown args:%s", unknown)
# tfrecord location and the check point directory location
tfrecord_location =args.data_dir + "/tf_records_lookalike_data_08july"
output = args.check_point_dir
def __data_parser(serialized_example):
features = tf.parse_single_example(serialized_example,
features={'keywords_list': tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),
'ucdoc': tf.FixedLenFeature([], tf.int64),
'keyword': tf.FixedLenFeature([], tf.int64),
'is_click': tf.FixedLenFeature([], tf.float32),
'sl': tf.FixedLenFeature([], tf.int64),
'lr': tf.FixedLenFeature([], tf.float32)
})
keywords_list = tf.cast(features['keywords_list'], tf.int32)
ucdoc = tf.cast(features['ucdoc'], tf.int32)
keyword = tf.cast(features['keyword'], tf.int32)
is_click = tf.cast(features['is_click'], tf.float32)
sl = tf.cast(features['sl'], tf.int32)
lr = tf.cast(features['lr'], tf.float32)
return ucdoc, keyword, keywords_list, is_click,sl,lr
names = []
for file in os.listdir(tfrecord_location):
if file.startswith("part"):
names.append(file)
file_paths = [os.path.join(tfrecord_location, name) for name in names]
dataset = tf.data.TFRecordDataset(file_paths)
shuffle_value = 2000
repeat_value = 10
batch_size = 1000
prefetch_buffer = 2000
dataset = dataset.map(__data_parser)
dataset = dataset.repeat(repeat_value).shuffle(shuffle_value).prefetch(buffer_size=prefetch_buffer).batch(batch_size)
iterator = dataset.make_one_shot_iterator()
tf_ucdoc, tf_keyword, tf_keywords_list, tf_is_click, tf_sl, tf_lr = iterator.get_next()
unique_keywords = 811
cate_list = np.array([x for x in range(unique_keywords)])
user_count = 1349500103
item_count, cate_count = unique_keywords, unique_keywords
predict_batch_size = 5000
predict_ads_num = 30
total_iterations = int((user_count * epoch)//batch_size)
print('total iterations = {}'.format(total_iterations))
max_epochs = 500
model = Model(user_count, item_count, cate_count, cate_list, predict_batch_size, predict_ads_num,tf_ucdoc,tf_keyword,tf_is_click,tf_keywords_list,tf_sl)
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
start_time = time.time()
count_epoch = 0
last_100_loss = []
print('shuffle = {}, epochs = {}, batch_size = {}, predict_batch_size = {}'.format(shuffle_value, epoch, batch_size, predict_batch_size))
for i in range(max_epochs*500):
loss, _,sl = sess.run([model.loss, model.train_op, tf_sl])
loss = round(loss, 2)
last_100_loss.append(loss)
if len(last_100_loss) == 101:
del last_100_loss[0]
if i%500==0:
print('Epoch {} DONE Iteration: {} Cost time: {} Model Loss: {} Average Loss: {}'.format(count_epoch, i, time.time()-start_time, loss,
round(sum(last_100_loss)/100, 2)))
model.save(sess, output)
count_epoch += 1
# print("i: ",i," loss: ",loss)
model.save(sess, output)
| 42.510638
| 152
| 0.654905
|
import numpy as np
import os, time
import random
import tensorflow as tf
from lookalike_model.trainer.model_new import Model
import argparse
random.seed(1234)
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, help="input data tfrecords dir location")
parser.add_argument("--check_point_dir", type=str, help="Check Point dir location")
args, unknown = parser.parse_known_args()
if len(unknown) != 0:
print("unknown args:%s", unknown)
tfrecord_location =args.data_dir + "/tf_records_lookalike_data_08july"
output = args.check_point_dir
def __data_parser(serialized_example):
features = tf.parse_single_example(serialized_example,
features={'keywords_list': tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),
'ucdoc': tf.FixedLenFeature([], tf.int64),
'keyword': tf.FixedLenFeature([], tf.int64),
'is_click': tf.FixedLenFeature([], tf.float32),
'sl': tf.FixedLenFeature([], tf.int64),
'lr': tf.FixedLenFeature([], tf.float32)
})
keywords_list = tf.cast(features['keywords_list'], tf.int32)
ucdoc = tf.cast(features['ucdoc'], tf.int32)
keyword = tf.cast(features['keyword'], tf.int32)
is_click = tf.cast(features['is_click'], tf.float32)
sl = tf.cast(features['sl'], tf.int32)
lr = tf.cast(features['lr'], tf.float32)
return ucdoc, keyword, keywords_list, is_click,sl,lr
names = []
for file in os.listdir(tfrecord_location):
if file.startswith("part"):
names.append(file)
file_paths = [os.path.join(tfrecord_location, name) for name in names]
dataset = tf.data.TFRecordDataset(file_paths)
shuffle_value = 2000
repeat_value = 10
batch_size = 1000
prefetch_buffer = 2000
dataset = dataset.map(__data_parser)
dataset = dataset.repeat(repeat_value).shuffle(shuffle_value).prefetch(buffer_size=prefetch_buffer).batch(batch_size)
iterator = dataset.make_one_shot_iterator()
tf_ucdoc, tf_keyword, tf_keywords_list, tf_is_click, tf_sl, tf_lr = iterator.get_next()
unique_keywords = 811
cate_list = np.array([x for x in range(unique_keywords)])
user_count = 1349500103
item_count, cate_count = unique_keywords, unique_keywords
predict_batch_size = 5000
predict_ads_num = 30
total_iterations = int((user_count * epoch)//batch_size)
print('total iterations = {}'.format(total_iterations))
max_epochs = 500
model = Model(user_count, item_count, cate_count, cate_list, predict_batch_size, predict_ads_num,tf_ucdoc,tf_keyword,tf_is_click,tf_keywords_list,tf_sl)
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
start_time = time.time()
count_epoch = 0
last_100_loss = []
print('shuffle = {}, epochs = {}, batch_size = {}, predict_batch_size = {}'.format(shuffle_value, epoch, batch_size, predict_batch_size))
for i in range(max_epochs*500):
loss, _,sl = sess.run([model.loss, model.train_op, tf_sl])
loss = round(loss, 2)
last_100_loss.append(loss)
if len(last_100_loss) == 101:
del last_100_loss[0]
if i%500==0:
print('Epoch {} DONE Iteration: {} Cost time: {} Model Loss: {} Average Loss: {}'.format(count_epoch, i, time.time()-start_time, loss,
round(sum(last_100_loss)/100, 2)))
model.save(sess, output)
count_epoch += 1
model.save(sess, output)
| true
| true
|
790546acdd4c4c652c4d84bc19aef2e2d1738c9a
| 3,439
|
py
|
Python
|
openslides_backend/action/actions/organization/initial_import.py
|
MJJojo97/openslides-backend
|
af0d1edb0070e352d46f285a1ba0bbe3702d49ae
|
[
"MIT"
] | null | null | null |
openslides_backend/action/actions/organization/initial_import.py
|
MJJojo97/openslides-backend
|
af0d1edb0070e352d46f285a1ba0bbe3702d49ae
|
[
"MIT"
] | null | null | null |
openslides_backend/action/actions/organization/initial_import.py
|
MJJojo97/openslides-backend
|
af0d1edb0070e352d46f285a1ba0bbe3702d49ae
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict, Iterable, Optional, Tuple
from datastore.shared.util import DeletedModelsBehaviour
from ....models.checker import Checker, CheckException
from ....models.models import Organization
from ....shared.exceptions import ActionException
from ....shared.filters import FilterOperator
from ....shared.interfaces.event import EventType
from ....shared.interfaces.write_request import WriteRequest
from ....shared.patterns import Collection, FullQualifiedId
from ....shared.util import INITIAL_DATA_FILE, get_initial_data_file
from ...action import Action
from ...mixins.singular_action_mixin import SingularActionMixin
from ...util.action_type import ActionType
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from ...util.typing import ActionData, ActionResults
@register_action("organization.initial_import", action_type=ActionType.STACK_INTERNAL)
class OrganizationInitialImport(SingularActionMixin, Action):
"""
Action to import an initial-data.json in an empty datastore.
Should be callable from the management service.
"""
model = Organization()
schema = DefaultSchema(Organization()).get_default_schema(
additional_required_fields={"data": {"type": "object"}},
title="Import initial data.",
description="Import an initial data json in an empty datastore.",
)
def perform(
self, action_data: ActionData, user_id: int, internal: bool = False
) -> Tuple[Optional[WriteRequest], Optional[ActionResults]]:
"""
Simplified entrypoint to perform the action.
"""
self.user_id = user_id
self.index = 0
instance = next(iter(action_data))
self.validate_instance(instance)
instance = self.update_instance(instance)
self.write_requests.extend(self.create_write_requests(instance))
final_write_request = self.process_write_requests()
return (final_write_request, [None])
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
data = instance["data"]
self.check_empty_datastore()
if not data:
data = get_initial_data_file(INITIAL_DATA_FILE)
instance["data"] = data
# check datavalidation
checker = Checker(data=data, mode="all")
try:
checker.run_check()
except CheckException as ce:
raise ActionException(str(ce))
return instance
def check_empty_datastore(self) -> None:
filter_ = FilterOperator("id", ">=", 1)
if self.datastore.exists(
Collection("organization"),
filter_,
DeletedModelsBehaviour.ALL_MODELS,
False,
):
raise ActionException("Datastore is not empty.")
def create_write_requests(self, instance: Dict[str, Any]) -> Iterable[WriteRequest]:
json_data = instance["data"]
write_requests = []
for collection in json_data:
for entry in json_data[collection].values():
fqid = FullQualifiedId(Collection(collection), entry["id"])
write_requests.append(
self.build_write_request(
EventType.Create,
fqid,
"initial import",
entry,
)
)
return write_requests
| 36.978495
| 88
| 0.655714
|
from typing import Any, Dict, Iterable, Optional, Tuple
from datastore.shared.util import DeletedModelsBehaviour
from ....models.checker import Checker, CheckException
from ....models.models import Organization
from ....shared.exceptions import ActionException
from ....shared.filters import FilterOperator
from ....shared.interfaces.event import EventType
from ....shared.interfaces.write_request import WriteRequest
from ....shared.patterns import Collection, FullQualifiedId
from ....shared.util import INITIAL_DATA_FILE, get_initial_data_file
from ...action import Action
from ...mixins.singular_action_mixin import SingularActionMixin
from ...util.action_type import ActionType
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from ...util.typing import ActionData, ActionResults
@register_action("organization.initial_import", action_type=ActionType.STACK_INTERNAL)
class OrganizationInitialImport(SingularActionMixin, Action):
model = Organization()
schema = DefaultSchema(Organization()).get_default_schema(
additional_required_fields={"data": {"type": "object"}},
title="Import initial data.",
description="Import an initial data json in an empty datastore.",
)
def perform(
self, action_data: ActionData, user_id: int, internal: bool = False
) -> Tuple[Optional[WriteRequest], Optional[ActionResults]]:
self.user_id = user_id
self.index = 0
instance = next(iter(action_data))
self.validate_instance(instance)
instance = self.update_instance(instance)
self.write_requests.extend(self.create_write_requests(instance))
final_write_request = self.process_write_requests()
return (final_write_request, [None])
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
data = instance["data"]
self.check_empty_datastore()
if not data:
data = get_initial_data_file(INITIAL_DATA_FILE)
instance["data"] = data
checker = Checker(data=data, mode="all")
try:
checker.run_check()
except CheckException as ce:
raise ActionException(str(ce))
return instance
def check_empty_datastore(self) -> None:
filter_ = FilterOperator("id", ">=", 1)
if self.datastore.exists(
Collection("organization"),
filter_,
DeletedModelsBehaviour.ALL_MODELS,
False,
):
raise ActionException("Datastore is not empty.")
def create_write_requests(self, instance: Dict[str, Any]) -> Iterable[WriteRequest]:
json_data = instance["data"]
write_requests = []
for collection in json_data:
for entry in json_data[collection].values():
fqid = FullQualifiedId(Collection(collection), entry["id"])
write_requests.append(
self.build_write_request(
EventType.Create,
fqid,
"initial import",
entry,
)
)
return write_requests
| true
| true
|
79054726c0312f6763070ed44dc85c4d890ac147
| 6,480
|
py
|
Python
|
tests/helpers/test_service.py
|
don66/home-assistant
|
a277470363c0758bb305410aad49c257ff8bac40
|
[
"Apache-2.0"
] | 37
|
2018-05-22T07:17:26.000Z
|
2022-03-03T13:14:46.000Z
|
tests/helpers/test_service.py
|
sara0871/https-wakatime.com-android-studio
|
5a15b2c036b332c17d5f6a06664378e9273d684f
|
[
"Apache-2.0"
] | 34
|
2018-05-22T07:19:40.000Z
|
2022-03-11T23:21:03.000Z
|
tests/helpers/test_service.py
|
sara0871/https-wakatime.com-android-studio
|
5a15b2c036b332c17d5f6a06664378e9273d684f
|
[
"Apache-2.0"
] | 8
|
2018-05-30T20:05:26.000Z
|
2021-02-19T14:17:05.000Z
|
"""Test service helpers."""
import asyncio
from copy import deepcopy
import unittest
from unittest.mock import patch
# To prevent circular import when running just this file
import homeassistant.components # noqa
from homeassistant import core as ha, loader
from homeassistant.const import STATE_ON, STATE_OFF, ATTR_ENTITY_ID
from homeassistant.helpers import service, template
from homeassistant.setup import async_setup_component
import homeassistant.helpers.config_validation as cv
from tests.common import get_test_home_assistant, mock_service
class TestServiceHelpers(unittest.TestCase):
"""Test the Home Assistant service helpers."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.calls = mock_service(self.hass, 'test_domain', 'test_service')
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_template_service_call(self):
"""Test service call with templating."""
config = {
'service_template': '{{ \'test_domain.test_service\' }}',
'entity_id': 'hello.world',
'data_template': {
'hello': '{{ \'goodbye\' }}',
'data': {
'value': '{{ \'complex\' }}',
'simple': 'simple'
},
'list': ['{{ \'list\' }}', '2'],
},
}
service.call_from_config(self.hass, config)
self.hass.block_till_done()
self.assertEqual('goodbye', self.calls[0].data['hello'])
self.assertEqual('complex', self.calls[0].data['data']['value'])
self.assertEqual('simple', self.calls[0].data['data']['simple'])
self.assertEqual('list', self.calls[0].data['list'][0])
def test_passing_variables_to_templates(self):
"""Test passing variables to templates."""
config = {
'service_template': '{{ var_service }}',
'entity_id': 'hello.world',
'data_template': {
'hello': '{{ var_data }}',
},
}
service.call_from_config(self.hass, config, variables={
'var_service': 'test_domain.test_service',
'var_data': 'goodbye',
})
self.hass.block_till_done()
self.assertEqual('goodbye', self.calls[0].data['hello'])
def test_bad_template(self):
"""Test passing bad template."""
config = {
'service_template': '{{ var_service }}',
'entity_id': 'hello.world',
'data_template': {
'hello': '{{ states + unknown_var }}'
}
}
service.call_from_config(self.hass, config, variables={
'var_service': 'test_domain.test_service',
'var_data': 'goodbye',
})
self.hass.block_till_done()
self.assertEqual(len(self.calls), 0)
def test_split_entity_string(self):
"""Test splitting of entity string."""
service.call_from_config(self.hass, {
'service': 'test_domain.test_service',
'entity_id': 'hello.world, sensor.beer'
})
self.hass.block_till_done()
self.assertEqual(['hello.world', 'sensor.beer'],
self.calls[-1].data.get('entity_id'))
def test_not_mutate_input(self):
"""Test for immutable input."""
config = cv.SERVICE_SCHEMA({
'service': 'test_domain.test_service',
'entity_id': 'hello.world, sensor.beer',
'data': {
'hello': 1,
},
'data_template': {
'nested': {
'value': '{{ 1 + 1 }}'
}
}
})
orig = deepcopy(config)
# Only change after call is each template getting hass attached
template.attach(self.hass, orig)
service.call_from_config(self.hass, config, validate_config=False)
assert orig == config
@patch('homeassistant.helpers.service._LOGGER.error')
def test_fail_silently_if_no_service(self, mock_log):
"""Test failing if service is missing."""
service.call_from_config(self.hass, None)
self.assertEqual(1, mock_log.call_count)
service.call_from_config(self.hass, {})
self.assertEqual(2, mock_log.call_count)
service.call_from_config(self.hass, {
'service': 'invalid'
})
self.assertEqual(3, mock_log.call_count)
def test_extract_entity_ids(self):
"""Test extract_entity_ids method."""
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.states.set('light.Ceiling', STATE_OFF)
self.hass.states.set('light.Kitchen', STATE_OFF)
loader.get_component(self.hass, 'group').Group.create_group(
self.hass, 'test', ['light.Ceiling', 'light.Kitchen'])
call = ha.ServiceCall('light', 'turn_on',
{ATTR_ENTITY_ID: 'light.Bowl'})
self.assertEqual(['light.bowl'],
service.extract_entity_ids(self.hass, call))
call = ha.ServiceCall('light', 'turn_on',
{ATTR_ENTITY_ID: 'group.test'})
self.assertEqual(['light.ceiling', 'light.kitchen'],
service.extract_entity_ids(self.hass, call))
self.assertEqual(['group.test'], service.extract_entity_ids(
self.hass, call, expand_group=False))
@asyncio.coroutine
def test_async_get_all_descriptions(hass):
"""Test async_get_all_descriptions."""
group = loader.get_component(hass, 'group')
group_config = {group.DOMAIN: {}}
yield from async_setup_component(hass, group.DOMAIN, group_config)
descriptions = yield from service.async_get_all_descriptions(hass)
assert len(descriptions) == 1
assert 'description' in descriptions['group']['reload']
assert 'fields' in descriptions['group']['reload']
logger = loader.get_component(hass, 'logger')
logger_config = {logger.DOMAIN: {}}
yield from async_setup_component(hass, logger.DOMAIN, logger_config)
descriptions = yield from service.async_get_all_descriptions(hass)
assert len(descriptions) == 2
assert 'description' in descriptions[logger.DOMAIN]['set_level']
assert 'fields' in descriptions[logger.DOMAIN]['set_level']
| 35.604396
| 75
| 0.603395
|
import asyncio
from copy import deepcopy
import unittest
from unittest.mock import patch
import homeassistant.components
from homeassistant import core as ha, loader
from homeassistant.const import STATE_ON, STATE_OFF, ATTR_ENTITY_ID
from homeassistant.helpers import service, template
from homeassistant.setup import async_setup_component
import homeassistant.helpers.config_validation as cv
from tests.common import get_test_home_assistant, mock_service
class TestServiceHelpers(unittest.TestCase):
def setUp(self):
self.hass = get_test_home_assistant()
self.calls = mock_service(self.hass, 'test_domain', 'test_service')
def tearDown(self):
self.hass.stop()
def test_template_service_call(self):
config = {
'service_template': '{{ \'test_domain.test_service\' }}',
'entity_id': 'hello.world',
'data_template': {
'hello': '{{ \'goodbye\' }}',
'data': {
'value': '{{ \'complex\' }}',
'simple': 'simple'
},
'list': ['{{ \'list\' }}', '2'],
},
}
service.call_from_config(self.hass, config)
self.hass.block_till_done()
self.assertEqual('goodbye', self.calls[0].data['hello'])
self.assertEqual('complex', self.calls[0].data['data']['value'])
self.assertEqual('simple', self.calls[0].data['data']['simple'])
self.assertEqual('list', self.calls[0].data['list'][0])
def test_passing_variables_to_templates(self):
config = {
'service_template': '{{ var_service }}',
'entity_id': 'hello.world',
'data_template': {
'hello': '{{ var_data }}',
},
}
service.call_from_config(self.hass, config, variables={
'var_service': 'test_domain.test_service',
'var_data': 'goodbye',
})
self.hass.block_till_done()
self.assertEqual('goodbye', self.calls[0].data['hello'])
def test_bad_template(self):
config = {
'service_template': '{{ var_service }}',
'entity_id': 'hello.world',
'data_template': {
'hello': '{{ states + unknown_var }}'
}
}
service.call_from_config(self.hass, config, variables={
'var_service': 'test_domain.test_service',
'var_data': 'goodbye',
})
self.hass.block_till_done()
self.assertEqual(len(self.calls), 0)
def test_split_entity_string(self):
service.call_from_config(self.hass, {
'service': 'test_domain.test_service',
'entity_id': 'hello.world, sensor.beer'
})
self.hass.block_till_done()
self.assertEqual(['hello.world', 'sensor.beer'],
self.calls[-1].data.get('entity_id'))
def test_not_mutate_input(self):
config = cv.SERVICE_SCHEMA({
'service': 'test_domain.test_service',
'entity_id': 'hello.world, sensor.beer',
'data': {
'hello': 1,
},
'data_template': {
'nested': {
'value': '{{ 1 + 1 }}'
}
}
})
orig = deepcopy(config)
template.attach(self.hass, orig)
service.call_from_config(self.hass, config, validate_config=False)
assert orig == config
@patch('homeassistant.helpers.service._LOGGER.error')
def test_fail_silently_if_no_service(self, mock_log):
service.call_from_config(self.hass, None)
self.assertEqual(1, mock_log.call_count)
service.call_from_config(self.hass, {})
self.assertEqual(2, mock_log.call_count)
service.call_from_config(self.hass, {
'service': 'invalid'
})
self.assertEqual(3, mock_log.call_count)
def test_extract_entity_ids(self):
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.states.set('light.Ceiling', STATE_OFF)
self.hass.states.set('light.Kitchen', STATE_OFF)
loader.get_component(self.hass, 'group').Group.create_group(
self.hass, 'test', ['light.Ceiling', 'light.Kitchen'])
call = ha.ServiceCall('light', 'turn_on',
{ATTR_ENTITY_ID: 'light.Bowl'})
self.assertEqual(['light.bowl'],
service.extract_entity_ids(self.hass, call))
call = ha.ServiceCall('light', 'turn_on',
{ATTR_ENTITY_ID: 'group.test'})
self.assertEqual(['light.ceiling', 'light.kitchen'],
service.extract_entity_ids(self.hass, call))
self.assertEqual(['group.test'], service.extract_entity_ids(
self.hass, call, expand_group=False))
@asyncio.coroutine
def test_async_get_all_descriptions(hass):
group = loader.get_component(hass, 'group')
group_config = {group.DOMAIN: {}}
yield from async_setup_component(hass, group.DOMAIN, group_config)
descriptions = yield from service.async_get_all_descriptions(hass)
assert len(descriptions) == 1
assert 'description' in descriptions['group']['reload']
assert 'fields' in descriptions['group']['reload']
logger = loader.get_component(hass, 'logger')
logger_config = {logger.DOMAIN: {}}
yield from async_setup_component(hass, logger.DOMAIN, logger_config)
descriptions = yield from service.async_get_all_descriptions(hass)
assert len(descriptions) == 2
assert 'description' in descriptions[logger.DOMAIN]['set_level']
assert 'fields' in descriptions[logger.DOMAIN]['set_level']
| true
| true
|
7905476d1185959676b2530810eb2ba349ef8e5d
| 1,946
|
py
|
Python
|
docs/conf.py
|
saltstack/rend
|
cfbe365aff9f2177873eac8a38a3b650f704d363
|
[
"Apache-2.0"
] | 2
|
2019-09-05T15:14:37.000Z
|
2020-04-13T07:40:08.000Z
|
docs/conf.py
|
saltstack/rend
|
cfbe365aff9f2177873eac8a38a3b650f704d363
|
[
"Apache-2.0"
] | 1
|
2019-10-11T21:06:01.000Z
|
2019-10-11T21:06:01.000Z
|
docs/conf.py
|
saltstack/rend
|
cfbe365aff9f2177873eac8a38a3b650f704d363
|
[
"Apache-2.0"
] | 1
|
2019-10-01T19:08:17.000Z
|
2019-10-01T19:08:17.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'rend'
copyright = '2020, Thomas S Hatch'
author = 'Thomas S Hatch'
# The full version, including alpha/beta/rc tags
release = '4.1'
# -- General configuration ---------------------------------------------------
master_doc = 'index'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 33.551724
| 79
| 0.661871
|
project = 'rend'
copyright = '2020, Thomas S Hatch'
author = 'Thomas S Hatch'
release = '4.1'
master_doc = 'index'
extensions = [
]
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
html_theme = 'alabaster'
html_static_path = ['_static']
| true
| true
|
790549de632a3856f9a97e8555d1b3c969fa7280
| 4,345
|
py
|
Python
|
redash/__init__.py
|
bmaeser/redash
|
cf5c2c5ba2359fce0b331dc242f63c917464ed55
|
[
"BSD-2-Clause"
] | 1
|
2019-03-24T03:38:32.000Z
|
2019-03-24T03:38:32.000Z
|
redash/__init__.py
|
bmaeser/redash
|
cf5c2c5ba2359fce0b331dc242f63c917464ed55
|
[
"BSD-2-Clause"
] | null | null | null |
redash/__init__.py
|
bmaeser/redash
|
cf5c2c5ba2359fce0b331dc242f63c917464ed55
|
[
"BSD-2-Clause"
] | 1
|
2019-03-20T09:22:43.000Z
|
2019-03-20T09:22:43.000Z
|
import sys
import logging
import urlparse
import urllib
import redis
from flask import Flask, current_app
from flask_sslify import SSLify
from werkzeug.contrib.fixers import ProxyFix
from werkzeug.routing import BaseConverter
from statsd import StatsClient
from flask_mail import Mail
from flask_limiter import Limiter
from flask_limiter.util import get_ipaddr
from flask_migrate import Migrate
from redash import settings
from redash.query_runner import import_query_runners
from redash.destinations import import_destinations
__version__ = '7.0.0-beta'
import os
if os.environ.get("REMOTE_DEBUG"):
import ptvsd
ptvsd.enable_attach(address=('0.0.0.0', 5678))
def setup_logging():
handler = logging.StreamHandler(sys.stdout if settings.LOG_STDOUT else sys.stderr)
formatter = logging.Formatter(settings.LOG_FORMAT)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(settings.LOG_LEVEL)
# Make noisy libraries less noisy
if settings.LOG_LEVEL != "DEBUG":
logging.getLogger("passlib").setLevel("ERROR")
logging.getLogger("requests.packages.urllib3").setLevel("ERROR")
logging.getLogger("snowflake.connector").setLevel("ERROR")
logging.getLogger('apiclient').setLevel("ERROR")
def create_redis_connection():
logging.debug("Creating Redis connection (%s)", settings.REDIS_URL)
redis_url = urlparse.urlparse(settings.REDIS_URL)
if redis_url.scheme == 'redis+socket':
qs = urlparse.parse_qs(redis_url.query)
if 'virtual_host' in qs:
db = qs['virtual_host'][0]
else:
db = 0
client = redis.StrictRedis(unix_socket_path=redis_url.path, db=db)
else:
if redis_url.path:
redis_db = redis_url.path[1]
else:
redis_db = 0
# Redis passwords might be quoted with special characters
redis_password = redis_url.password and urllib.unquote(redis_url.password)
client = redis.StrictRedis(host=redis_url.hostname, port=redis_url.port, db=redis_db, password=redis_password)
return client
setup_logging()
redis_connection = create_redis_connection()
mail = Mail()
migrate = Migrate()
mail.init_mail(settings.all_settings())
statsd_client = StatsClient(host=settings.STATSD_HOST, port=settings.STATSD_PORT, prefix=settings.STATSD_PREFIX)
limiter = Limiter(key_func=get_ipaddr, storage_uri=settings.LIMITER_STORAGE)
import_query_runners(settings.QUERY_RUNNERS)
import_destinations(settings.DESTINATIONS)
from redash.version_check import reset_new_version_status
reset_new_version_status()
class SlugConverter(BaseConverter):
def to_python(self, value):
# This is ay workaround for when we enable multi-org and some files are being called by the index rule:
# for path in settings.STATIC_ASSETS_PATHS:
# full_path = safe_join(path, value)
# if os.path.isfile(full_path):
# raise ValidationError()
return value
def to_url(self, value):
return value
def create_app():
from redash import authentication, extensions, handlers
from redash.handlers.webpack import configure_webpack
from redash.handlers import chrome_logger
from redash.models import db, users
from redash.metrics.request import provision_app
from redash.utils import sentry
sentry.init()
app = Flask(__name__,
template_folder=settings.STATIC_ASSETS_PATH,
static_folder=settings.STATIC_ASSETS_PATH,
static_path='/static')
# Make sure we get the right referral address even behind proxies like nginx.
app.wsgi_app = ProxyFix(app.wsgi_app, settings.PROXIES_COUNT)
app.url_map.converters['org_slug'] = SlugConverter
if settings.ENFORCE_HTTPS:
SSLify(app, skips=['ping'])
# configure our database
app.config['SQLALCHEMY_DATABASE_URI'] = settings.SQLALCHEMY_DATABASE_URI
app.config.update(settings.all_settings())
provision_app(app)
db.init_app(app)
migrate.init_app(app, db)
mail.init_app(app)
authentication.init_app(app)
limiter.init_app(app)
handlers.init_app(app)
configure_webpack(app)
extensions.init_extensions(app)
chrome_logger.init_app(app)
users.init_app(app)
return app
| 31.258993
| 118
| 0.729574
|
import sys
import logging
import urlparse
import urllib
import redis
from flask import Flask, current_app
from flask_sslify import SSLify
from werkzeug.contrib.fixers import ProxyFix
from werkzeug.routing import BaseConverter
from statsd import StatsClient
from flask_mail import Mail
from flask_limiter import Limiter
from flask_limiter.util import get_ipaddr
from flask_migrate import Migrate
from redash import settings
from redash.query_runner import import_query_runners
from redash.destinations import import_destinations
__version__ = '7.0.0-beta'
import os
if os.environ.get("REMOTE_DEBUG"):
import ptvsd
ptvsd.enable_attach(address=('0.0.0.0', 5678))
def setup_logging():
handler = logging.StreamHandler(sys.stdout if settings.LOG_STDOUT else sys.stderr)
formatter = logging.Formatter(settings.LOG_FORMAT)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(settings.LOG_LEVEL)
if settings.LOG_LEVEL != "DEBUG":
logging.getLogger("passlib").setLevel("ERROR")
logging.getLogger("requests.packages.urllib3").setLevel("ERROR")
logging.getLogger("snowflake.connector").setLevel("ERROR")
logging.getLogger('apiclient').setLevel("ERROR")
def create_redis_connection():
logging.debug("Creating Redis connection (%s)", settings.REDIS_URL)
redis_url = urlparse.urlparse(settings.REDIS_URL)
if redis_url.scheme == 'redis+socket':
qs = urlparse.parse_qs(redis_url.query)
if 'virtual_host' in qs:
db = qs['virtual_host'][0]
else:
db = 0
client = redis.StrictRedis(unix_socket_path=redis_url.path, db=db)
else:
if redis_url.path:
redis_db = redis_url.path[1]
else:
redis_db = 0
redis_password = redis_url.password and urllib.unquote(redis_url.password)
client = redis.StrictRedis(host=redis_url.hostname, port=redis_url.port, db=redis_db, password=redis_password)
return client
setup_logging()
redis_connection = create_redis_connection()
mail = Mail()
migrate = Migrate()
mail.init_mail(settings.all_settings())
statsd_client = StatsClient(host=settings.STATSD_HOST, port=settings.STATSD_PORT, prefix=settings.STATSD_PREFIX)
limiter = Limiter(key_func=get_ipaddr, storage_uri=settings.LIMITER_STORAGE)
import_query_runners(settings.QUERY_RUNNERS)
import_destinations(settings.DESTINATIONS)
from redash.version_check import reset_new_version_status
reset_new_version_status()
class SlugConverter(BaseConverter):
def to_python(self, value):
return value
def to_url(self, value):
return value
def create_app():
from redash import authentication, extensions, handlers
from redash.handlers.webpack import configure_webpack
from redash.handlers import chrome_logger
from redash.models import db, users
from redash.metrics.request import provision_app
from redash.utils import sentry
sentry.init()
app = Flask(__name__,
template_folder=settings.STATIC_ASSETS_PATH,
static_folder=settings.STATIC_ASSETS_PATH,
static_path='/static')
app.wsgi_app = ProxyFix(app.wsgi_app, settings.PROXIES_COUNT)
app.url_map.converters['org_slug'] = SlugConverter
if settings.ENFORCE_HTTPS:
SSLify(app, skips=['ping'])
app.config['SQLALCHEMY_DATABASE_URI'] = settings.SQLALCHEMY_DATABASE_URI
app.config.update(settings.all_settings())
provision_app(app)
db.init_app(app)
migrate.init_app(app, db)
mail.init_app(app)
authentication.init_app(app)
limiter.init_app(app)
handlers.init_app(app)
configure_webpack(app)
extensions.init_extensions(app)
chrome_logger.init_app(app)
users.init_app(app)
return app
| true
| true
|
79054a9f5cf29c63018ceead1b2180a90661739d
| 1,402
|
py
|
Python
|
nemo/package_info.py
|
btarjan/NeMo
|
6a2bb4d19524b0bff198b3d9bbd116f82486a36e
|
[
"Apache-2.0"
] | null | null | null |
nemo/package_info.py
|
btarjan/NeMo
|
6a2bb4d19524b0bff198b3d9bbd116f82486a36e
|
[
"Apache-2.0"
] | null | null | null |
nemo/package_info.py
|
btarjan/NeMo
|
6a2bb4d19524b0bff198b3d9bbd116f82486a36e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MAJOR = 1
MINOR = 7
PATCH = 0
PRE_RELEASE = 'rc'
# Use the following formatting: (major, minor, patch, pre-release)
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
__shortversion__ = '.'.join(map(str, VERSION[:3]))
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
__package_name__ = 'nemo_toolkit'
__contact_names__ = 'NVIDIA'
__contact_emails__ = 'nemo-toolkit@nvidia.com'
__homepage__ = 'https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/'
__repository_url__ = 'https://github.com/nvidia/nemo'
__download_url__ = 'https://github.com/NVIDIA/NeMo/releases'
__description__ = 'NeMo - a toolkit for Conversational AI'
__license__ = 'Apache2'
__keywords__ = 'deep learning, machine learning, gpu, NLP, NeMo, nvidia, pytorch, torch, tts, speech, language'
| 38.944444
| 111
| 0.74679
|
MAJOR = 1
MINOR = 7
PATCH = 0
PRE_RELEASE = 'rc'
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
__shortversion__ = '.'.join(map(str, VERSION[:3]))
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
__package_name__ = 'nemo_toolkit'
__contact_names__ = 'NVIDIA'
__contact_emails__ = 'nemo-toolkit@nvidia.com'
__homepage__ = 'https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/'
__repository_url__ = 'https://github.com/nvidia/nemo'
__download_url__ = 'https://github.com/NVIDIA/NeMo/releases'
__description__ = 'NeMo - a toolkit for Conversational AI'
__license__ = 'Apache2'
__keywords__ = 'deep learning, machine learning, gpu, NLP, NeMo, nvidia, pytorch, torch, tts, speech, language'
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.