code stringlengths 101 5.91M |
|---|
def main(opts):
copy_folder(opts.data_root, opts.out_root)
headsets = [0, 1, 2, 3]
meetings = []
with open(opts.ami_meeting_ids, 'r') as f:
for meetid in f:
meetings.append(meetid.strip())
assert (len(meetings) > 0), 'Looks like meeting list is empty'
sdms = []
if (len(opts.map_ihm2sdm) > 0):
sdms = opts.map_ihm2sdm.split(',')
for sdm in sdms:
assert (sdm in ['0', '1', '2', '3', '4', '5', '6', '7']), 'There are only 8 distant mics in AMI (0...7)Pick one of them instead {}'.format(sdm)
print('Preparing AMI for {} meetings, headset plus {} sdms channels'.format(len(meetings), len(sdms)))
file2spkidx = {}
for meeting in meetings:
print('Processing meeting {}'.format(meeting))
file_out = '{}/{}.Headset.vad'.format(opts.out_root, meeting)
if (not os.path.exists(file_out)):
print('VADing signals to build {} list...'.format(file_out))
with open(file_out, 'w') as f:
wav_lst = []
for headset in headsets:
(meetpath, headset_file) = mk_mic_path(meeting, headset, 'ihm')
wav_lst.append((opts.data_root, meetpath, headset_file))
for wav_entry in wav_lst:
annotations = segment_signal(wav_entry)
for annotation in annotations:
f.write(annotation)
else:
print('[!] Found existing {} file, proceeding with it'.format(file_out))
with open(file_out, 'r') as f:
fnames = [l.rstrip() for l in f]
print('Producing segments out of VAD list for ihms...')
beg_t = timer()
for headset in headsets:
(meetpath, headset_file) = mk_mic_path(meeting, headset, 'ihm')
print('Working on {}'.format(headset_file))
(signal, fs) = sf.read(os.path.join(opts.data_root, meetpath, headset_file))
(signal, side) = handle_multichannel_wav(signal, opts.channel)
signal = (signal / np.max(np.abs(signal)))
for (li, line) in tqdm.tqdm(enumerate(fnames, start=1), total=len(fnames)):
(wav_file, beg_samp, end_samp, seg_id) = line.split(' ')
if (wav_file != headset_file):
continue
segment = signal[int(float(beg_samp)):int(float(end_samp))]
out_wav = wav_file.replace('.wav', (('-' + str(seg_id)) + '.wav'))
path_out = os.path.join(opts.out_root, meetpath, out_wav)
sf.write(path_out, segment, fs)
file2spkidx[out_wav] = wav_file.replace('.wav', '')
if (len(sdms) > 0):
print('Producing segments out of VAD list for sdms...')
for sdm in sdms:
(meetpath, sdm_file) = mk_mic_path(meeting, sdm, 'sdm')
path_in = os.path.join(opts.data_root, meetpath, sdm_file)
if (not os.path.exists(path_in)):
print('File {} not found. Skipping.'.format(path_in))
continue
(signal, fs) = sf.read(path_in)
signal = (signal / np.max(np.abs(signal)))
for (li, line) in tqdm.tqdm(enumerate(fnames, start=1), total=len(fnames)):
(wav_file, beg_samp, end_samp, seg_id) = line.split(' ')
segment = signal[int(float(beg_samp)):int(float(end_samp))]
wav_file_basename = wav_file.replace('.wav', '')
wav_out = '{}-{}.Arr1-0{}.wav'.format(wav_file_basename, seg_id, sdm)
path_out = os.path.join(opts.out_root, meetpath, wav_out)
sf.write(path_out, segment, fs)
file2spkidx[wav_out] = wav_file_basename
end_t = timer()
print('Finalized segments production for meeting : {}'.format(meeting))
print('Production time: {:.1f} s'.format((end_t - beg_t)))
np.save(os.path.join(opts.out_root, opts.utt2spk_dict), file2spkidx, allow_pickle=True)
print('Finished all stuff') |
class FortranIntrinsics():
IMPLEMENTATIONS_AST = {'SELECTED_INT_KIND': SelectedKind, 'SELECTED_REAL_KIND': SelectedKind, 'SUM': Sum, 'PRODUCT': Product, 'ANY': Any, 'COUNT': Count, 'ALL': All, 'MINVAL': MinVal, 'MAXVAL': MaxVal, 'MERGE': Merge}
DIRECT_REPLACEMENTS = {'__dace_selected_int_kind': SelectedKind, '__dace_selected_real_kind': SelectedKind}
EXEMPTED_FROM_CALL_EXTRACTION = [Merge]
def __init__(self):
self._transformations_to_run = set()
def transformations(self) -> Set[Type[NodeTransformer]]:
return self._transformations_to_run
def function_names() -> List[str]:
return list(LoopBasedReplacement.INTRINSIC_TO_DACE.values())
def call_extraction_exemptions() -> List[str]:
return [func.Transformation.func_name() for func in FortranIntrinsics.EXEMPTED_FROM_CALL_EXTRACTION]
def replace_function_name(self, node: FASTNode) -> ast_internal_classes.Name_Node:
func_name = node.string
replacements = {'INT': '__dace_int', 'DBLE': '__dace_dble', 'SQRT': 'sqrt', 'COSH': 'cosh', 'ABS': 'abs', 'MIN': 'min', 'MAX': 'max', 'EXP': 'exp', 'EPSILON': '__dace_epsilon', 'TANH': 'tanh', 'SIGN': '__dace_sign', 'EXP': 'exp'}
if (func_name in replacements):
return ast_internal_classes.Name_Node(name=replacements[func_name])
else:
if self.IMPLEMENTATIONS_AST[func_name].has_transformation():
self._transformations_to_run.add(self.IMPLEMENTATIONS_AST[func_name].Transformation)
return ast_internal_classes.Name_Node(name=self.IMPLEMENTATIONS_AST[func_name].replaced_name(func_name))
def replace_function_reference(self, name: ast_internal_classes.Name_Node, args: ast_internal_classes.Arg_List_Node, line):
func_types = {'__dace_int': 'INT', '__dace_dble': 'DOUBLE', 'sqrt': 'DOUBLE', 'cosh': 'DOUBLE', 'abs': 'DOUBLE', 'min': 'DOUBLE', 'max': 'DOUBLE', 'exp': 'DOUBLE', '__dace_epsilon': 'DOUBLE', 'tanh': 'DOUBLE', '__dace_sign': 'DOUBLE'}
if (name.name in func_types):
call_type = func_types[name.name]
return ast_internal_classes.Call_Expr_Node(name=name, type=call_type, args=args.args, line_number=line)
elif (name.name in self.DIRECT_REPLACEMENTS):
return self.DIRECT_REPLACEMENTS[name.name].replace(name, args, line)
else:
return ast_internal_classes.Call_Expr_Node(name=name, type='VOID', args=args.args, line_number=line) |
(reuse_venv=True)
def clean(session):
parser = argparse.ArgumentParser()
parser.add_argument('--headers', action='store_true')
parser.add_argument('--signatures', action='store_true')
parser.add_argument('--tests', action='store_true')
parser.add_argument('--docs', action='store_true')
args = parser.parse_args(session.posargs)
clean_all = (not session.posargs)
if (args.headers or clean_all):
remove_if_found(pathlib.Path('awkward-cpp', 'header-only'), pathlib.Path('src', 'awkward', '_connect', 'header-only'))
if (args.signatures or clean_all):
remove_if_found(pathlib.Path('awkward-cpp', 'include', 'awkward', 'kernels.h'), pathlib.Path('awkward-cpp', 'src', 'awkward_cpp', '_kernel_signatures.py'), pathlib.Path('src', 'awkward', '_connect', 'cuda', '_kernel_signatures.py'))
if (args.tests or clean_all):
remove_if_found(pathlib.Path('awkward-cpp', 'tests-spec'), pathlib.Path('awkward-cpp', 'tests-spec-explicit'), pathlib.Path('awkward-cpp', 'tests-cpu-kernels'), pathlib.Path('tests-cuda-kernels'))
if (args.docs or clean_all):
remove_if_found(pathlib.Path('docs', 'reference', 'generated', 'kernels.rst')) |
def clean_segmentations(mask_path, output_path):
cap_img_path = os.path.join(mask_path, 'capsule')
reg_img_path = os.path.join(mask_path, 'regions')
maybe_mkdir(os.path.join(output_path, 'capsule'))
maybe_mkdir(os.path.join(output_path, 'regions'))
for f in os.listdir(cap_img_path):
cap_img = cv2.imread(os.path.join(cap_img_path, f))
reg_img = cv2.imread(os.path.join(reg_img_path, f))
cap_img = cv2.cvtColor(cap_img, cv2.COLOR_BGR2GRAY)
(contours, hierarchy) = cv2.findContours(cap_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]
new_cap_img = np.zeros(cap_img.shape)
if contour_sizes:
biggest_contour = max(contour_sizes, key=(lambda x: x[0]))[1]
cv2.fillPoly(new_cap_img, pts=[biggest_contour], color=1)
reg_img = reg_img
cv2.imwrite(os.path.join(output_path, 'capsule', f), new_cap_img)
cv2.imwrite(os.path.join(output_path, 'regions', f), reg_img) |
def convert_sst_general(paths, dataset_name, version):
in_directory = paths['SENTIMENT_BASE']
sst_dir = os.path.join(in_directory, 'sentiment-treebank')
train_phrases = process_sst.get_phrases(version, 'train.txt', sst_dir)
dev_phrases = process_sst.get_phrases(version, 'dev.txt', sst_dir)
test_phrases = process_sst.get_phrases(version, 'test.txt', sst_dir)
out_directory = paths['SENTIMENT_DATA_DIR']
dataset = [train_phrases, dev_phrases, test_phrases]
process_utils.write_dataset(dataset, out_directory, dataset_name) |
def Toroidal6RegularGrid2dGraph(p, q):
if ((p <= 3) or (q <= 3)):
raise ValueError('parameters p and q must be integers larger than 3')
g = ToroidalGrid2dGraph(p, q)
for (u, v) in g:
g.add_edge((u, v), (((u + 1) % p), ((v + 1) % q)))
g.name('Toroidal Hexagonal Grid graph on {}x{} elements'.format(p, q))
return g |
def folds_label_combination_pairs_without_evidence(y, folds, order):
combinations_per_row = get_combination_wise_output_matrix(y, order)
all_combinations = get_unique_combinations(combinations_per_row)
return np.sum([len(all_combinations.difference(get_unique_combinations(combinations_per_row[[fold]]))) for fold in folds]) |
def fix_prefix_quotations(summary_content):
ix = 0
fixed_content = []
while (ix < len(summary_content)):
sentence = summary_content[ix]
if (fixed_content and re.match('^[\\"\\\']$', sentence)):
fixed_content[(- 1)] = (fixed_content[(- 1)] + sentence)
ix += 1
elif (fixed_content and re.match('^[\\"\\\'] [a-z]', sentence)):
fixed_content[(- 1)] = (fixed_content[(- 1)] + sentence)
ix += 1
elif re.match('^[\\"\\\'] [A-Z]', sentence):
sent_split = sentence.split(' ')
fixed_content[(- 1)] = (fixed_content[(- 1)] + sent_split[0].strip())
fixed_content.append(' '.join(sent_split[1:]).strip())
ix += 1
else:
fixed_content.append(sentence)
ix += 1
return fixed_content |
class TransformerMockingjay(TransformerInitModel):
def __init__(self, config, input_dim, output_attentions=False, keep_multihead_output=False, with_input_module=True):
super(TransformerMockingjay, self).__init__(config, output_attentions)
self.with_input_module = with_input_module
if self.with_input_module:
self.input_representations = TransformerInputRepresentations(config, input_dim)
self.encoder = TransformerEncoder(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
self.apply(self.init_Transformer_weights)
self.input_size = input_dim
def prune_heads(self, heads_to_prune):
for (layer, heads) in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_multihead_outputs(self):
return [layer.attention.self.multihead_output for layer in self.encoder.layer]
def forward(self, spec_input, pos_enc=None, attention_mask=None, output_all_encoded_layers=False, head_mask=None):
if (attention_mask is None):
attention_mask = torch.ones_like(spec_input)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=spec_input.dtype)
extended_attention_mask = ((1.0 - extended_attention_mask) * (- 10000.0))
if (head_mask is not None):
if (head_mask.dim() == 1):
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze((- 1)).unsqueeze((- 1))
head_mask = head_mask.expand_as(self.config.num_hidden_layers, (- 1), (- 1), (- 1), (- 1))
elif (head_mask.dim() == 2):
head_mask = head_mask.unsqueeze(1).unsqueeze((- 1)).unsqueeze((- 1))
head_mask = head_mask.to(dtype=spec_input.dtype)
else:
head_mask = ([None] * self.config.num_hidden_layers)
if self.with_input_module:
input_representations = self.input_representations(spec_input, pos_enc)
else:
input_representations = spec_input
encoded_layers = self.encoder(input_representations, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers, head_mask=head_mask)
if self.output_attentions:
(all_attentions, encoded_layers) = encoded_layers
if (not output_all_encoded_layers):
encoded_layers = encoded_layers[(- 1)]
if self.output_attentions:
return Output(output=all_attentions, hidden_states=encoded_layers)
return Output(hidden_states=encoded_layers) |
def CheckSpacingForFunctionCall(filename, line, linenum, error):
fncall = line
for pattern in ('\\bif\\s*\\((.*)\\)\\s*{', '\\bfor\\s*\\((.*)\\)\\s*{', '\\bwhile\\s*\\((.*)\\)\\s*[{;]', '\\bswitch\\s*\\((.*)\\)\\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1)
break
if ((not Search('\\b(if|for|while|switch|return|new|delete|catch|sizeof)\\b', fncall)) and (not Search(' \\([^)]+\\)\\([^)]*(\\)|,$)', fncall)) and (not Search(' \\([^)]+\\)\\[[^\\]]+\\]', fncall))):
if Search('\\w\\s*\\(\\s(?!\\s*\\\\$)', fncall):
error(filename, linenum, 'whitespace/parens', 4, 'Extra space after ( in function call')
elif Search('\\(\\s+(?!(\\s*\\\\)|\\()', fncall):
error(filename, linenum, 'whitespace/parens', 2, 'Extra space after (')
if (Search('\\w\\s+\\(', fncall) and (not Search('#\\s*define|typedef', fncall)) and (not Search('\\w\\s+\\((\\w+::)*\\*\\w+\\)\\(', fncall))):
error(filename, linenum, 'whitespace/parens', 4, 'Extra space before ( in function call')
if Search('[^)]\\s+\\)\\s*[^{\\s]', fncall):
if Search('^\\s+\\)', fncall):
error(filename, linenum, 'whitespace/parens', 2, 'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2, 'Extra space before )') |
def pad_and_stack(tensors, pad_size=None, value=0):
sizes = [s.shape[0] for s in tensors]
if (not pad_size):
pad_size = max(sizes)
padded = torch.stack([F.pad(input=sent[:pad_size], pad=(0, 0, 0, max(0, (pad_size - size))), value=value) for (sent, size) in zip(tensors, sizes)], dim=0)
return (padded, sizes) |
def _JDUTC_to_BJDTDB(JDUTC, ra=0.0, dec=0.0, epoch=2451545.0, pmra=0.0, pmdec=0.0, px=0.0, rv=0.0, loc=None, ephemeris='de430', leap_dir=os.path.join(os.path.dirname(__file__), 'data'), leap_update=True):
(JDTDB, JDTT, warning, error) = JDUTC_to_JDTDB(JDUTC)
clock_corr = ((JDTDB.jd - JDUTC.jd) * 86400.0)
(r_pint, v_pint) = PINT.gcrs_posvel_from_itrf(loc, JDUTC, JDTT)
r_eci = r_pint[0]
v_eci = v_pint[0]
earth_geo = get_body_barycentric_posvel('earth', JDTDB, ephemeris=ephemeris)
r_obs = (r_eci + (earth_geo[0].xyz.value * 1000.0))
v_geo = ((earth_geo[1].xyz.value * 1000.0) / 86400.0)
v_obs = ((v_eci + v_geo) / (1.0 + ((v_eci * v_geo) / (c ** 2))))
einstein_corr = (np.sum((r_eci * v_geo)) / (c * c))
r0hat = np.array([(math.cos(((ra * np.pi) / 180.0)) * math.cos(((dec * np.pi) / 180.0))), (math.sin(((ra * np.pi) / 180.0)) * math.cos(((dec * np.pi) / 180.0))), math.sin(((dec * np.pi) / 180.0))])
up = [0.0, 0.0, 1.0]
east = np.cross(up, r0hat)
east = (east / math.sqrt(sum((east * east))))
north = np.cross(r0hat, east)
mu = ((((pmra * east) + (pmdec * north)) / pctoau) / 1000)
epoch0 = (2000.0 + ((epoch - 2451545.0) / 365.25))
yearnow = (2000.0 + ((JDTDB.jd - 2451545.0) / 365.25))
T = (yearnow - epoch0)
vpi = (((rv / 1000.0) * kmstoauyr) * ((px / 1000.0) / pctoau))
vel = (mu + (vpi * r0hat))
r = (r0hat + (vel * T))
rhat = (r / math.sqrt(sum((r * r))))
geo_corr = (np.sum((r_obs * rhat)) / c)
delta_t = ((geo_corr + clock_corr) + einstein_corr)
result = (JDUTC.jd + (delta_t / 86400.0))
return (result, warning, error) |
def main():
cfg = args_parse()
logger.info(f'load model, model arch: {cfg.MODEL.NAME}')
tokenizer = BertTokenizerFast.from_pretrained(cfg.MODEL.BERT_CKPT)
collator = DataCollator(tokenizer=tokenizer)
(train_loader, valid_loader, test_loader) = make_loaders(collator, train_path=cfg.DATASETS.TRAIN, valid_path=cfg.DATASETS.VALID, test_path=cfg.DATASETS.TEST, batch_size=cfg.SOLVER.BATCH_SIZE, num_workers=4)
if (cfg.MODEL.NAME == 'softmaskedbert4csc'):
model = SoftMaskedBert4Csc(cfg, tokenizer)
elif (cfg.MODEL.NAME == 'macbert4csc'):
model = MacBert4Csc(cfg, tokenizer)
else:
raise ValueError('model not found.')
if (cfg.MODEL.WEIGHTS and os.path.exists(cfg.MODEL.WEIGHTS)):
model.load_from_checkpoint(checkpoint_path=cfg.MODEL.WEIGHTS, cfg=cfg, map_location=device, tokenizer=tokenizer)
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
ckpt_callback = ModelCheckpoint(monitor='val_loss', dirpath=cfg.OUTPUT_DIR, filename='{epoch:02d}-{val_loss:.2f}', save_top_k=1, mode='min')
logger.info('train model ...')
trainer = pl.Trainer(max_epochs=cfg.SOLVER.MAX_EPOCHS, gpus=(None if (device == torch.device('cpu')) else cfg.MODEL.GPU_IDS), accumulate_grad_batches=cfg.SOLVER.ACCUMULATE_GRAD_BATCHES, callbacks=[ckpt_callback])
torch.autograd.set_detect_anomaly(True)
if (('train' in cfg.MODE) and train_loader and (len(train_loader) > 0)):
if (valid_loader and (len(valid_loader) > 0)):
trainer.fit(model, train_loader, valid_loader)
else:
trainer.fit(model, train_loader)
logger.info('train model done.')
if (ckpt_callback and (len(ckpt_callback.best_model_path) > 0)):
ckpt_path = ckpt_callback.best_model_path
elif (cfg.MODEL.WEIGHTS and os.path.exists(cfg.MODEL.WEIGHTS)):
ckpt_path = cfg.MODEL.WEIGHTS
else:
ckpt_path = ''
logger.info(f'ckpt_path: {ckpt_path}')
if (ckpt_path and os.path.exists(ckpt_path)):
model.load_state_dict(torch.load(ckpt_path)['state_dict'])
tokenizer.save_pretrained(cfg.OUTPUT_DIR)
bert = BertForMaskedLM.from_pretrained(cfg.MODEL.BERT_CKPT)
bert.save_pretrained(cfg.OUTPUT_DIR)
state_dict = torch.load(ckpt_path)['state_dict']
new_state_dict = OrderedDict()
if (cfg.MODEL.NAME in ['macbert4csc']):
for (k, v) in state_dict.items():
if k.startswith('bert.'):
new_state_dict[k[5:]] = v
else:
new_state_dict = state_dict
torch.save(new_state_dict, os.path.join(cfg.OUTPUT_DIR, 'pytorch_model.bin'))
if (('test' in cfg.MODE) and test_loader and (len(test_loader) > 0)):
trainer.test(model, test_loader) |
def merge_workload(dataset: str, version: str, workload: str, count: int=10) -> None:
queryset = {'train': [], 'valid': [], 'test': []}
labels = {'train': [], 'valid': [], 'test': []}
for i in range(count):
L.info(f'Merge querset {workload}_{i}...')
qs = load_queryset(dataset, f'{workload}_{i}')
ls = load_labels(dataset, version, f'{workload}_{i}')
for k in queryset.keys():
queryset[k] += qs[k]
labels[k] += ls[k]
for k in queryset.keys():
L.info(f'Final queryset has {len(queryset[k])} queries with {len(labels[k])} labels')
L.info('Dump queryset and labels...')
dump_queryset(dataset, workload, queryset)
dump_labels(dataset, version, workload, labels)
L.info(f'Done, run: rm data/{dataset}/workload/{workload}_[0-9]* to remove temporary files') |
def get_condition(filename):
c_path = (('data/timbre_model/test/condition/' + filename) + '_condi.npy')
conditon = np.load(c_path).astype(np.float)
return torch.Tensor(conditon).transpose(0, 1) |
class Modulation(Enum):
FSK = 0
SF6 = 6
SF7 = 7
SF8 = 8
SF9 = 9
SF10 = 10
SF11 = 11
SF12 = 12
def __str__(self):
if (self == Modulation.FSK):
return 'Modulation: FSK'
else:
return ('Modulation: LoRa, Spreading Factor ' + self.value) |
class DataLoader():
def __init__(self, config, split, type_='train', lang='en'):
assert (config.extension in ['json'])
self.config = config
self.extension = self.config.extension
self.max_length = self.config.max_length
self.max_tweets = self.config.max_tweets
self.lang = lang
if (self.lang == 'zh'):
print('Doing RD for chinese')
nlp = nlp_chinese
if (type_ == 'train'):
self.data_folder_path = self.config.data_folder
self.train_file_path = self.config.train_file_path
self.test_1_file_path = self.config.test_1_file_path
self.test_2_file_path = self.config.test_2_file_path
self.run_pipeline()
def get_data(self, type_, return_id=False):
assert (type_ in ['train', 'train_test', 'test_1', 'test_2', 'test'])
max_batch_size = (self.config.batch_size if (type_ == 'train') else (self.config.batch_size_test if (type_ == 'train_test') else (self.config.batch_size_test if (type_ == 'test') else (self.config.batch_size_test if (type_ == 'test_1') else (self.config.batch_size_test if (type_ == 'test_2') else 'something is wrong')))))
data = (self.train_batch if (type_ == 'train') else (self.train_test_batch if (type_ == 'train_test') else (self.test_batch if (type_ == 'test') else (self.test_1_batch if (type_ == 'test_1') else (self.test_2_batch if (type_ == 'test_2') else 'something is wrong')))))
for batch in data:
id_ = getattr(batch, self.config.keys_order['post_id'])
X = getattr(batch, self.config.keys_order['content'])
y = getattr(batch, self.config.keys_order['label'])
structure = getattr(batch, self.config.keys_order['structure'])
time_delay = getattr(batch, self.config.keys_order['time_delay'])
(batch_size, num_articles, num_words) = X.shape
word_pos = np.repeat(np.expand_dims(np.repeat(np.expand_dims(np.arange(num_words), axis=0), num_articles, axis=0), axis=0), batch_size, axis=0)
word_pos = torch.from_numpy(word_pos)
attention_mask_word = torch.where((X == 1), torch.zeros(1), torch.ones(1)).type(torch.FloatTensor)
check = torch.sum(torch.where((X == 1), torch.ones(1), torch.zeros(1)), dim=(- 1))
attention_mask_post = torch.where((check == self.config.max_length), torch.zeros(1), torch.ones(1)).type(torch.FloatTensor)
if (batch_size >= len(self.config.gpu_idx)):
if return_id:
(yield (id_, X, y, word_pos, time_delay, structure, attention_mask_word, attention_mask_post))
else:
(yield (X, y, word_pos, time_delay, structure, attention_mask_word, attention_mask_post))
def clean_text(text):
text = re.sub(' 'URL', text)
text = text.replace("'s", '')
text = text.replace("'", '')
text = text.replace("n't", " n't")
text = text.replace('', '')
text = text.replace('#', '')
text = text.replace('_', ' ')
text = text.replace('-', ' ')
text = text.replace('&', '')
text = text.replace('>', '')
text = text.replace('"', '')
text = text.replace('.', '')
text = text.replace(',', '')
text = text.replace('(', '')
text = text.replace(')', '')
text = ' '.join(text.split())
return text.strip()
def clean_tokenized_text(text_lst):
if (len(text_lst) <= 1):
return text_lst
idx = 0
cleaned_token_lst = []
while (idx < (len(text_lst) - 1)):
current_token = text_lst[idx]
next_token = text_lst[(idx + 1)]
if (current_token != next_token):
cleaned_token_lst.append(current_token)
idx += 1
else:
last_idx = (max([(i + idx) for (i, val) in enumerate(text_lst[idx:]) if (val == current_token)]) + 1)
cleaned_token_lst.append(current_token)
idx = last_idx
if (cleaned_token_lst[(- 1)] != text_lst[(- 1)]):
cleaned_token_lst.append(text_lst[(- 1)])
return cleaned_token_lst
def tokenize_structure(structure_lst):
return structure_lst
def tokenize_text(text):
text = DataLoader.clean_text(text)
token_lst = [token.text.lower() for token in nlp(text)]
token_lst = DataLoader.clean_tokenized_text(token_lst)
return token_lst
def define_fields(self):
self.id_field = Field(sequential=False, tokenize=(lambda x: x), use_vocab=True)
self.tweet_field = Field(sequential=True, tokenize=DataLoader.tokenize_text, include_lengths=False, lower=True, fix_length=self.max_length, use_vocab=True)
self.timestamp_field = Field(sequential=False, include_lengths=False, use_vocab=False)
self.structure_field = Field(sequential=True, tokenize=(lambda x: DataLoader.tokenize_structure(x)), include_lengths=False, fix_length=self.config.max_tweets, pad_token=self.config.num_structure_index, use_vocab=False)
self.label_field = Field(sequential=False, use_vocab=False)
self.tweet_lst_field = NestedField(self.tweet_field, fix_length=self.config.max_tweets)
self.timestamp_lst_field = NestedField(self.timestamp_field, pad_token=str(self.config.size), fix_length=self.config.max_tweets)
self.structure_lst_field = NestedField(self.structure_field, fix_length=self.config.max_tweets)
data_fields = {}
for (key, val) in self.config.keys_order.items():
if (key == 'post_id'):
data_fields[val] = (val, self.id_field)
if (key == 'content'):
data_fields[val] = (val, self.tweet_lst_field)
elif (key == 'label'):
data_fields[val] = (val, self.label_field)
elif (key == 'time_delay'):
data_fields[val] = (val, self.timestamp_lst_field)
elif (key == 'structure'):
data_fields[val] = (val, self.structure_lst_field)
self.data_fields = data_fields
def read_data(self, path):
data = TabularDataset(path=path, format=self.extension, fields=self.data_fields)
return data
def build_vectors(self):
vec = vocab.Vectors(name=self.config.glove_file, cache=self.config.glove_directory)
self.id_field.build_vocab(getattr(self.train, self.config.keys_order['post_id']), getattr(self.test_1, self.config.keys_order['post_id']), getattr(self.test_2, self.config.keys_order['post_id']))
self.tweet_field.build_vocab(getattr(self.train, self.config.keys_order['content']), getattr(self.test_1, self.config.keys_order['content']), getattr(self.test_2, self.config.keys_order['content']), max_size=self.config.max_vocab, vectors=vec)
def load_batches(self, dataset, batch_size):
data = BucketIterator.splits(datasets=(dataset,), batch_sizes=(batch_size,), sort_key=(lambda x: len(getattr(x, self.config.keys_order['content']))), sort_within_batch=True, repeat=False)
return data[0]
def load_vocab_vectors(self, vocab):
self.tweet_field.vocab = vocab
def run_pipeline(self):
self.define_fields()
self.train = self.read_data(os.path.join(self.data_folder_path, self.train_file_path))
self.test_1 = self.read_data(os.path.join(self.data_folder_path, self.test_1_file_path))
self.test_2 = self.read_data(os.path.join(self.data_folder_path, self.test_2_file_path))
self.build_vectors()
self.train_batch = self.load_batches(self.train, self.config.batch_size)
self.train_test_batch = self.load_batches(self.train, self.config.batch_size_test)
self.test_1_batch = self.load_batches(self.test_1, self.config.batch_size_test)
self.test_2_batch = self.load_batches(self.test_2, self.config.batch_size_test) |
def storage_gather(storage: SingleProcessTensorStorage, dst_rank: int=0) -> Optional[MultiProcessTensorStorage]:
if isinstance(storage, SingleProcessRamTensorStorage):
return _ram_storage_gather(storage, dst_rank)
elif isinstance(storage, SingleProcessFileTensorStorage):
return _file_storage_gather(storage, dst_rank)
raise Exception(f'Unsupported storage for gather operation: {storage}') |
class MT5EncoderModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def _parametric_plot3d_curve(f, urange, plot_points, **kwds):
from sage.plot.misc import setup_for_eval_on_grid
(g, ranges) = setup_for_eval_on_grid(f, [urange], plot_points)
(f_x, f_y, f_z) = g
w = [(f_x(u), f_y(u), f_z(u)) for u in xsrange(*ranges[0], include_endpoint=True)]
return line3d(w, **kwds) |
def test_RecordArray():
a = ak.Array([[{'x': [1], 'y': [[2]]}], None, [None], [{'x': None, 'y': None}], [{'x': [None], 'y': [None]}], [{'x': [11], 'y': [[None]]}]])
assert (to_list(ak.drop_none(a, axis=1)) == to_list(a[(~ ak.is_none(a, axis=1))]))
assert (to_list(ak.drop_none(a, axis=2)) == [[{'x': [1], 'y': [[2]]}], None, [None], [{'x': None, 'y': None}], [{'x': [], 'y': []}], [{'x': [11], 'y': [[None]]}]]) |
class ReplayBuffer():
def __init__(self, obs_dim, act_dim, size):
self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)
self.rews_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
(self.ptr, self.size, self.max_size) = (0, 0, size)
def store(self, obs, act, rew, next_obs, done):
self.obs1_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = ((self.ptr + 1) % self.max_size)
self.size = min((self.size + 1), self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
return dict(obs1=self.obs1_buf[idxs], obs2=self.obs2_buf[idxs], acts=self.acts_buf[idxs], rews=self.rews_buf[idxs], done=self.done_buf[idxs]) |
def is_array_like(x) -> bool:
return (hasattr(x, 'shape') and hasattr(x, 'dtype') and hasattr(x, 'T')) |
def save_to_log(logdir, logfile, message):
f = open(((logdir + '/') + logfile), 'a')
f.write((message + '\n'))
f.close()
return |
def sanity_check(state_dict, pretrained_weights):
print("=> loading '{}' for sanity check".format(pretrained_weights))
checkpoint = torch.load(pretrained_weights, map_location='cpu')
state_dict_pre = checkpoint['state_dict']
for (k, k_pre) in zip(list(state_dict.keys()), list(state_dict_pre.keys())):
if (('fc.weight' in k) or ('fc.bias' in k)):
continue
assert (state_dict[k].cpu() == state_dict_pre[k_pre]).all(), '{} is changed in linear classifier training.'.format(k)
print('=> sanity check passed.') |
class TestBeamStep(tf.test.TestCase):
def setUp(self):
super(TestBeamStep, self).setUp()
self.state_size = 10
config = beam_search.BeamSearchConfig(beam_width=3, vocab_size=5, eos_token=0, length_penalty_weight=0.6, choose_successors_fn=beam_search.choose_top_k)
self.config = config
def test_step(self):
beam_state = beam_search.BeamSearchState(log_probs=tf.nn.log_softmax(tf.ones(self.config.beam_width)), lengths=tf.constant(2, shape=[self.config.beam_width], dtype=tf.int32), finished=tf.zeros([self.config.beam_width], dtype=tf.bool))
logits_ = np.full([self.config.beam_width, self.config.vocab_size], 0.0001)
logits_[(0, 2)] = 1.9
logits_[(0, 3)] = 2.1
logits_[(1, 3)] = 3.1
logits_[(1, 4)] = 0.9
logits = tf.convert_to_tensor(logits_, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits)
(outputs, next_beam_state) = beam_search.beam_search_step(time_=2, logits=logits, beam_state=beam_state, config=self.config)
with self.test_session() as sess:
(outputs_, next_state_, state_, log_probs_) = sess.run([outputs, next_beam_state, beam_state, log_probs])
np.testing.assert_array_equal(outputs_.predicted_ids, [3, 3, 2])
np.testing.assert_array_equal(outputs_.beam_parent_ids, [1, 0, 0])
np.testing.assert_array_equal(next_state_.lengths, [3, 3, 3])
np.testing.assert_array_equal(next_state_.finished, [False, False, False])
expected_log_probs = state_.log_probs[[1, 0, 0]]
expected_log_probs[0] += log_probs_[(1, 3)]
expected_log_probs[1] += log_probs_[(0, 3)]
expected_log_probs[2] += log_probs_[(0, 2)]
np.testing.assert_array_equal(next_state_.log_probs, expected_log_probs)
def test_step_with_eos(self):
beam_state = beam_search.BeamSearchState(log_probs=tf.nn.log_softmax(tf.ones(self.config.beam_width)), lengths=tf.convert_to_tensor([2, 1, 2], dtype=tf.int32), finished=tf.constant([False, True, False], dtype=tf.bool))
logits_ = np.full([self.config.beam_width, self.config.vocab_size], 0.0001)
logits_[(0, 2)] = 1.1
logits_[(1, 2)] = 1.0
logits_[(2, 2)] = 1.0
logits = tf.convert_to_tensor(logits_, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits)
(outputs, next_beam_state) = beam_search.beam_search_step(time_=2, logits=logits, beam_state=beam_state, config=self.config)
with self.test_session() as sess:
(outputs_, next_state_, state_, log_probs_) = sess.run([outputs, next_beam_state, beam_state, log_probs])
np.testing.assert_array_equal(outputs_.predicted_ids, [0, 2, 2])
np.testing.assert_array_equal(outputs_.beam_parent_ids, [1, 0, 2])
np.testing.assert_array_equal(next_state_.lengths, [1, 3, 3])
np.testing.assert_array_equal(next_state_.finished, [True, False, False])
expected_log_probs = state_.log_probs[outputs_.beam_parent_ids]
expected_log_probs[1] += log_probs_[(0, 2)]
expected_log_probs[2] += log_probs_[(2, 2)]
np.testing.assert_array_equal(next_state_.log_probs, expected_log_probs)
def test_step_with_new_eos(self):
beam_state = beam_search.BeamSearchState(log_probs=tf.nn.log_softmax(tf.ones(self.config.beam_width)), lengths=tf.constant(2, shape=[self.config.beam_width], dtype=tf.int32), finished=tf.zeros([self.config.beam_width], dtype=tf.bool))
logits_ = np.full([self.config.beam_width, self.config.vocab_size], 0.0001)
logits_[(0, 0)] = 1.9
logits_[(0, 3)] = 2.1
logits_[(1, 3)] = 3.1
logits_[(1, 4)] = 0.9
logits = tf.convert_to_tensor(logits_, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits)
(outputs, next_beam_state) = beam_search.beam_search_step(time_=2, logits=logits, beam_state=beam_state, config=self.config)
with self.test_session() as sess:
(outputs_, next_state_, state_, log_probs_) = sess.run([outputs, next_beam_state, beam_state, log_probs])
np.testing.assert_array_equal(outputs_.predicted_ids, [3, 3, 0])
np.testing.assert_array_equal(outputs_.beam_parent_ids, [1, 0, 0])
np.testing.assert_array_equal(next_state_.lengths, [3, 3, 2])
np.testing.assert_array_equal(next_state_.finished, [False, False, True])
expected_log_probs = state_.log_probs[[1, 0, 0]]
expected_log_probs[0] += log_probs_[(1, 3)]
expected_log_probs[1] += log_probs_[(0, 3)]
expected_log_probs[2] += log_probs_[(0, 0)]
np.testing.assert_array_equal(next_state_.log_probs, expected_log_probs) |
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--config-file', nargs='?', type=str, help='path to config file')
parser.add_argument('--train-subjects', nargs='?', type=str, help='List of subject-id:s to train on, choosing from range(0,6): ex [0,1,2,3]')
parser.add_argument('--val-subjects', nargs='?', type=str, help='List of subject-id:s to validate on, choosing from range(0,6): ex [4,5]')
parser.add_argument('--test-subjects', nargs='?', type=str, help='List of subject-id:s to test on, choosing from range(0,6): ex [4,5]')
parser.add_argument('--dataset-str', nargs='?', type=str, help="Which dataset to get subjects from 'lps224', 'lps', 'lps_pftrain_224'")
parser.add_argument('--subjects-overview', nargs='?', type=str, help='List with ID:s of subjects in dataset.')
parser.add_argument('--job-identifier', nargs='?', type=str, help='Choose some string to identify the image of the training process')
parser.add_argument('--test-run', nargs='?', type=int, help='Whether to run as a quick test or not.')
parser.add_argument('--batch-size', nargs='?', type=int, help='')
parser.add_argument('--dropout-1', nargs='?', type=float, help='')
parser.add_argument('--kernel-size', nargs='?', type=int, help='')
parser.add_argument('--lr', nargs='?', type=float, help='')
parser.add_argument('--nb-lstm-layers', nargs='?', type=int, help='')
parser.add_argument('--nb-lstm-units', nargs='?', type=int, help='')
parser.add_argument('--nb-layers-enc', nargs='?', type=int, help='')
parser.add_argument('--nb-heads-enc', nargs='?', type=int, help='')
parser.add_argument('--model-size', nargs='?', type=int, help='')
parser.add_argument('--optimizer', nargs='?', type=str, help='')
parser.add_argument('--nb-pain-train', nargs='?', type=int, help='')
parser.add_argument('--nb-nopain-train', nargs='?', type=int, help='')
parser.add_argument('--nb-pain-val', nargs='?', type=int, help='')
parser.add_argument('--nb-nopain-val', nargs='?', type=int, help='')
return parser.parse_args() |
def multiprocess_training_loader(process_number: int, _config, _queue: mp.Queue, _wait_for_exit: mp.Event, _local_file, _fasttext_vocab_cached_mapping, _fasttext_vocab_cached_data):
_tokenizer = None
if (_config['preprocessed_tokenized'] == True):
_tokenizer = WordTokenizer(word_splitter=JustSpacesWordSplitter())
if (_config['token_embedder_type'] == 'embedding'):
_token_indexers = {'tokens': SingleIdTokenIndexer(lowercase_tokens=True)}
_vocab = Vocabulary.from_files(_config['vocab_directory'])
elif (_config['token_embedder_type'] == 'fasttext'):
_token_indexers = {'tokens': FastTextNGramIndexer(_config['fasttext_max_subwords'])}
_vocab = FastTextVocab(_fasttext_vocab_cached_mapping, _fasttext_vocab_cached_data, _config['fasttext_max_subwords'])
elif (_config['token_embedder_type'] == 'elmo'):
_token_indexers = {'tokens': ELMoTokenCharactersIndexer()}
_vocab = None
_triple_loader = IrTripleDatasetReader(lazy=True, tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=_config['max_doc_length'], max_query_length=_config['max_query_length'])
_iterator = BucketIterator(batch_size=int(_config['batch_size_train']), sorting_keys=[('doc_pos_tokens', 'num_tokens'), ('doc_neg_tokens', 'num_tokens')])
_iterator.index_with(_vocab)
for training_batch in _iterator(_triple_loader.read(_local_file), num_epochs=1):
_queue.put(training_batch)
_queue.close()
_wait_for_exit.wait() |
class HKONowcastingFactory(EncoderForecasterBaseFactory):
def __init__(self, batch_size, in_seq_len, out_seq_len, name='hko_nowcasting'):
super(HKONowcastingFactory, self).__init__(batch_size=batch_size, in_seq_len=in_seq_len, out_seq_len=out_seq_len, height=cfg.HKO.ITERATOR.HEIGHT, width=cfg.HKO.ITERATOR.WIDTH, name=name)
self._central_region = cfg.HKO.EVALUATION.CENTRAL_REGION
def _slice_central(self, data):
(x_begin, y_begin, x_end, y_end) = self._central_region
return mx.sym.slice(data, begin=(0, 0, 0, y_begin, x_begin), end=(None, None, None, y_end, x_end))
def _concat_month_code(self):
raise NotImplementedError
def loss_sym(self, pred=mx.sym.Variable('pred'), mask=mx.sym.Variable('mask'), target=mx.sym.Variable('target')):
self.reset_all()
weights = get_loss_weight_symbol(data=target, mask=mask, seq_len=self._out_seq_len)
mse = weighted_mse(pred=pred, gt=target, weight=weights)
mae = weighted_mae(pred=pred, gt=target, weight=weights)
gdl = masked_gdl_loss(pred=pred, gt=target, mask=mask)
avg_mse = mx.sym.mean(mse)
avg_mae = mx.sym.mean(mae)
avg_gdl = mx.sym.mean(gdl)
global_grad_scale = cfg.MODEL.NORMAL_LOSS_GLOBAL_SCALE
if (cfg.MODEL.L2_LAMBDA > 0):
avg_mse = mx.sym.MakeLoss(avg_mse, grad_scale=(global_grad_scale * cfg.MODEL.L2_LAMBDA), name='mse')
else:
avg_mse = mx.sym.BlockGrad(avg_mse, name='mse')
if (cfg.MODEL.L1_LAMBDA > 0):
avg_mae = mx.sym.MakeLoss(avg_mae, grad_scale=(global_grad_scale * cfg.MODEL.L1_LAMBDA), name='mae')
else:
avg_mae = mx.sym.BlockGrad(avg_mae, name='mae')
if (cfg.MODEL.GDL_LAMBDA > 0):
avg_gdl = mx.sym.MakeLoss(avg_gdl, grad_scale=(global_grad_scale * cfg.MODEL.GDL_LAMBDA), name='gdl')
else:
avg_gdl = mx.sym.BlockGrad(avg_gdl, name='gdl')
loss = mx.sym.Group([avg_mse, avg_mae, avg_gdl])
return loss |
def tau(a, b, eta):
s = p(a, b, eta)
taus = (s * normal.tau(a, b))
return taus.sum(axis=0) |
def facets_for_RP4():
from sage.groups.perm_gps.permgroup import PermutationGroup
g1 = '(2, 7)(4, 10)(5, 6)(11, 12)'
g2 = '(1, 2, 3, 4, 5, 10)(6, 8, 9)(11, 12, 13, 14, 15, 16)'
G = PermutationGroup([g1, g2])
t1 = (1, 2, 4, 5, 11)
t2 = (1, 2, 4, 11, 13)
facets = []
for g in G:
d = g.dict()
for t in [t1, t2]:
new = tuple([d[j] for j in t])
if (new not in facets):
facets.append(new)
return facets |
def hide_rename_model(model_name):
model = m_repo.get_model(name=model_name, load_final_checkpoint=True, load_evaluations=True)
for e in m_repo.get_evaluations([x.uuid for x in model.final_checkpoint.evaluations]):
m_repo.hide_evaluation(e.uuid)
new_name = (model_name + f'_hidden_{random.randint(0, 10000)}')
m_repo.rename_model(model.uuid, new_name)
m_repo.hide_model(model.uuid) |
def offsets_to_index(offsets):
toindex = []
for x in range((len(offsets) - 1)):
offset = (offsets[(x + 1)] - offsets[x])
if (offset == 0):
toindex.append(offsets[x])
for y in range(offset):
toindex.append((offsets[x] + y))
return toindex |
def dataset_dest_prefix(args, output_prefix, lang):
base = '{}/{}'.format(args.destdir, output_prefix)
lang_part = ('.{}-{}.{}'.format(args.source_lang, args.target_lang, lang) if (lang is not None) else '')
return '{}{}'.format(base, lang_part) |
def accuracy(y_pred, y_true, topk=(1,)):
maxk = max(topk)
batch_size = y_true.size(0)
(_, pred) = y_pred.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(y_true.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view((- 1)).float().sum(0)
res.append(correct_k.mul_((1.0 / batch_size)))
return res |
def forzen_param(model):
flag = False
for (name, param) in model.named_parameters():
if ('10' in name):
flag = True
param.requires_grad = flag
return True |
def move_montgomery(root_folder, truth_csv, destination_root):
root_path = Path(root_folder)
os.makedirs(destination_root, exist_ok=True)
truth = pd.read_csv(truth_csv)
healthy = truth[(truth['No Finding'] == 1)]
disease = truth[(truth['Consolidation'] == 1)]
dst_path = (Path(destination_root) / 'healthy')
os.makedirs(str(dst_path), exist_ok=True)
for (i, row) in healthy.iterrows():
fname = row['Path']
path = fname.split('/')[(- 4):(- 1)]
fname = fname.replace('view.jpg', 'view1_frontal.jpg')
path.append('view1_frontal.jpg')
dst_fname = (dst_path / f'{path[(- 3)]}.jpg')
shutil.copy(str((root_path / fname)), dst_fname)
dst_path = (Path(destination_root) / 'disease')
os.makedirs(str(dst_path), exist_ok=True)
for (i, row) in disease.iterrows():
fname = row['Path']
path = fname.split('/')[(- 4):(- 1)]
fname = fname.replace('view.jpg', 'view1_frontal.jpg')
path.append('view1_frontal.jpg')
dst_fname = (dst_path / f'{path[(- 3)]}.jpg')
shutil.copy(str((root_path / fname)), dst_fname) |
def main():
char_list = ['a', 'b', 'c', 'd']
img_list = [numpy.zeros((14, 14), dtype='float32'), numpy.zeros((12, 12), dtype='float32')]
transcription_list = [[0, 1, 2], [2, 0, 1]]
out_file_name = 'test.h5'
write_to_hdf(img_list, transcription_list, char_list, out_file_name) |
def perspective_API(data, api_key):
from .perspective import PerspectiveApiScorer
scorer = PerspectiveApiScorer(api_key=api_key)
(scores, all_scores) = ([], [])
for sample in tqdm(data):
all_score = scorer.get_scores(sample['output'])
all_scores.append(all_score)
scores.append((1 - np.max(list(all_score.values()))))
return (scores, all_scores) |
class Foo():
def __init__(self, x):
self.x = x
def __eq__(self, other):
return (self.__dict__ == other.__dict__) |
def configCLEVR():
config.dataPath = '{dataBasedir}/CLEVR_v1/data'.format(dataBasedir=config.dataBasedir)
config.datasetFilename = 'CLEVR_{tier}_questions.json'
config.wordVectorsFile = './CLEVR_v1/data/glove/glove.6B.{dim}d.txt'.format(dim=config.wrdEmbDim)
config.imageDims = [14, 14, 1024]
config.programLims = [5, 10, 15, 20]
config.questionLims = [10, 15, 20, 25] |
class Cookies(object):
def store_cookies(cls, name, cookies):
pickled_cookies = json.dumps({'cookies': cookies, 'loginTime': datetime.datetime.now().timestamp()})
cookies_con.hset('account', name, pickled_cookies)
cls.push_in_queue(name)
def push_in_queue(cls, name):
for i in range(cookies_con.llen('account_queue')):
tn = cookies_con.lindex('account_queue', i).decode('utf-8')
if tn:
if (tn == name):
return
cookies_con.rpush('account_queue', name)
def fetch_cookies(cls):
if (MODE == 'normal'):
return cls.fetch_cookies_of_normal()
else:
return cls.fetch_cookies_of_quick()
def fetch_cookies_of_normal(cls):
for i in range(cookies_con.llen('account_queue')):
name = cookies_con.lpop('account_queue').decode('utf-8')
j_account = cookies_con.hget('account', name)
if (not j_account):
return None
else:
j_account = j_account.decode('utf-8')
if cls.check_cookies_timeout(j_account):
cls.delete_cookies(name)
continue
cookies_con.rpush('account_queue', name)
account = json.loads(j_account)
return (name, account['cookies'])
return None
def fetch_cookies_of_quick(cls):
hostname = socket.gethostname()
my_cookies_name = cookies_con.hget('host', hostname)
if my_cookies_name:
my_cookies = cookies_con.hget('account', my_cookies_name)
if (not cls.check_cookies_timeout(my_cookies)):
my_cookies = json.loads(my_cookies.decode('utf-8'))
return (my_cookies_name, my_cookies['cookies'])
else:
cls.delete_cookies(my_cookies_name)
while True:
try:
name = cookies_con.lpop('account_queue').decode('utf-8')
except AttributeError:
return None
else:
j_account = cookies_con.hget('account', name)
if cls.check_cookies_timeout(j_account):
cls.delete_cookies(name)
continue
j_account = j_account.decode('utf-8')
hosts = cookies_con.hget('cookies_host', name)
if (not hosts):
hosts = dict()
else:
hosts = hosts.decode('utf-8')
hosts = json.loads(hosts)
hosts[hostname] = 1
cookies_con.hset('cookies_host', name, json.dumps(hosts))
account = json.loads(j_account)
cookies_con.hset('host', hostname, name)
if (len(hosts) < SHARE_HOST_COUNT):
cookies_con.lpush('account_queue', name)
return (name, account['cookies'])
def delete_cookies(cls, name):
cookies_con.hdel('account', name)
if (MODE == 'quick'):
cookies_con.hdel('cookies_host', name)
return True
def check_login_task(cls):
if (broker_con.llen('login_queue') > 0):
broker_con.delete('login_queue')
def check_cookies_timeout(cls, cookies):
if (cookies is None):
return True
if isinstance(cookies, bytes):
cookies = cookies.decode('utf-8')
cookies = json.loads(cookies)
login_time = datetime.datetime.fromtimestamp(cookies['loginTime'])
if ((datetime.datetime.now() - login_time) > datetime.timedelta(hours=cookie_expire_time)):
crawler.warning('The account has been expired')
return True
return False |
_params({'X': ['array-like', 'sparse matrix'], 'y': ['array-like'], 'center': ['boolean'], 'force_finite': ['boolean']}, prefer_skip_nested_validation=True)
def r_regression(X, y, *, center=True, force_finite=True):
(X, y) = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
n_samples = X.shape[0]
if center:
y = (y - np.mean(y))
X_means = X.mean(axis=0)
X_means = (X_means.getA1() if isinstance(X_means, np.matrix) else X_means)
X_norms = np.sqrt((row_norms(X.T, squared=True) - (n_samples * (X_means ** 2))))
else:
X_norms = row_norms(X.T)
correlation_coefficient = safe_sparse_dot(y, X)
with np.errstate(divide='ignore', invalid='ignore'):
correlation_coefficient /= X_norms
correlation_coefficient /= np.linalg.norm(y)
if (force_finite and (not np.isfinite(correlation_coefficient).all())):
nan_mask = np.isnan(correlation_coefficient)
correlation_coefficient[nan_mask] = 0.0
return correlation_coefficient |
class ConvRelationModel(paddle.nn.Layer):
init_weight_attr = paddle.framework.ParamAttr(initializer=nn.initializer.TruncatedNormal(mean=0.0, std=0.01))
init_bias_attr = paddle.framework.ParamAttr(initializer=nn.initializer.Constant(value=0.0))
def __init__(self, input_size=(64, 21, 21), output_size=5, num_filters=64):
super(ConvRelationModel, self).__init__()
self.conv0 = ConvBlock(in_channels=(num_filters * 2), out_channels=num_filters, pooling_size=(2, 2))
self.conv1 = ConvBlock(in_channels=num_filters, out_channels=num_filters, pooling_size=(2, 2))
input_size = ((input_size[0] * (input_size[1] >> 2)) * (input_size[2] >> 2))
self.fc0 = paddle.nn.Sequential(paddle.nn.Flatten(), paddle.nn.Linear(in_features=input_size, out_features=8, weight_attr=self.init_weight_attr, bias_attr=self.init_bias_attr), paddle.nn.ReLU())
self.fc1 = paddle.nn.Sequential(paddle.nn.Linear(in_features=8, out_features=1, weight_attr=self.init_weight_attr, bias_attr=self.init_bias_attr), paddle.nn.Sigmoid())
self.output_size = output_size
def forward(self, prototypes, query_embeddings):
(ways, batch_size) = (prototypes.shape[0], query_embeddings.shape[0])
assert (ways == self.output_size)
relation_input = _reshape_input(prototypes, query_embeddings)
relation_score = self.conv0(relation_input)
relation_score = self.conv1(relation_score)
relation_score = self.fc0(relation_score)
relation_score = self.fc1(relation_score)
output = paddle.reshape(relation_score, shape=(batch_size, ways))
return output |
def flat_ner_performance(pred_start, pred_end, pred_span, gold_start, gold_end, gold_span, ner_cate, label_lst, threshold=0.5, dims=2):
cate_idx2label = {idx: value for (idx, value) in enumerate(label_lst)}
up_label_lst = update_label_lst(label_lst)
label2idx = {label: i for (i, label) in enumerate(up_label_lst)}
if (dims == 1):
ner_cate = cate_idx2label[ner_cate]
pred_bmes_label = flat_transform_bmes_label(pred_start, pred_end, pred_span, ner_cate, threshold=threshold)
gold_bmes_label = flat_transform_bmes_label(gold_start, gold_end, gold_span, ner_cate, threshold=threshold)
pred_bmes_idx = [label2idx[tmp] for tmp in pred_bmes_label]
gold_bmes_idx = [label2idx[tmp] for tmp in gold_bmes_label]
return (pred_bmes_idx, gold_bmes_idx, pred_bmes_label, gold_bmes_label)
elif (dims == 2):
pred_bmes_idx_lst = []
gold_bmes_idx_lst = []
pred_bmes_label_lst = []
gold_bmes_label_lst = []
acc_lst = []
example = 0
for (pred_start_item, pred_end_item, pred_span_item, gold_start_item, gold_end_item, gold_span_item, ner_cate_item) in zip(pred_start, pred_end, pred_span, gold_start, gold_end, gold_span, ner_cate):
(item_pred_bmes_idx, item_gold_bmes_idx, item_pred_bmes_label, item_gold_bmes_label) = flat_ner_performance(pred_start_item, pred_end_item, pred_span_item, gold_start_item, gold_end_item, gold_span_item, ner_cate_item, label_lst, dims=1)
pred_bmes_idx_lst.append(item_pred_bmes_idx)
gold_bmes_idx_lst.append(item_gold_bmes_idx)
pred_bmes_label_lst.append(item_pred_bmes_label)
gold_bmes_label_lst.append(item_gold_bmes_label)
tmp_acc = compute_acc(pred_bmes_idx_lst, gold_bmes_idx_lst)
acc_lst.append(tmp_acc)
result_score = flat_span_f1.mask_span_f1(pred_bmes_idx_lst, gold_bmes_idx_lst, label_list=up_label_lst)
(span_precision, span_recall, span_f1) = (result_score['span-precision'], result_score['span-recall'], result_score['span-f1'])
average_acc = (sum(acc_lst) / (len(acc_lst) * 1.0))
return (average_acc, span_precision, span_recall, span_f1) |
_cache(maxsize=256, typed=True)
def _compile_pattern(pat, case_sensitive):
if isinstance(pat, bytes):
pat_str = pat.decode('ISO-8859-1')
res_str = translate(pat_str)
res = res_str.encode('ISO-8859-1')
else:
res = translate(pat)
flags = (0 if case_sensitive else re.IGNORECASE)
return re.compile(res, flags).match |
class CUHK02(ImageDataset):
dataset_dir = 'cuhk02'
cam_pairs = ['P1', 'P2', 'P3', 'P4', 'P5']
test_cam_pair = 'P5'
def __init__(self, root='', **kwargs):
self.root = osp.abspath(osp.expanduser(root))
self.dataset_dir = osp.join(self.root, self.dataset_dir, 'Dataset')
required_files = [self.dataset_dir]
self.check_before_run(required_files)
(train, query, gallery) = self.get_data_list()
super(CUHK02, self).__init__(train, query, gallery, **kwargs)
def get_data_list(self):
(num_train_pids, camid) = (0, 0)
(train, query, gallery) = ([], [], [])
for cam_pair in self.cam_pairs:
cam_pair_dir = osp.join(self.dataset_dir, cam_pair)
cam1_dir = osp.join(cam_pair_dir, 'cam1')
cam2_dir = osp.join(cam_pair_dir, 'cam2')
impaths1 = glob.glob(osp.join(cam1_dir, '*.png'))
impaths2 = glob.glob(osp.join(cam2_dir, '*.png'))
if (cam_pair == self.test_cam_pair):
for impath in impaths1:
pid = osp.basename(impath).split('_')[0]
pid = int(pid)
query.append((impath, pid, camid))
camid += 1
for impath in impaths2:
pid = osp.basename(impath).split('_')[0]
pid = int(pid)
gallery.append((impath, pid, camid))
camid += 1
else:
pids1 = [osp.basename(impath).split('_')[0] for impath in impaths1]
pids2 = [osp.basename(impath).split('_')[0] for impath in impaths2]
pids = set((pids1 + pids2))
pid2label = {pid: (label + num_train_pids) for (label, pid) in enumerate(pids)}
for impath in impaths1:
pid = osp.basename(impath).split('_')[0]
pid = pid2label[pid]
train.append((impath, pid, camid))
camid += 1
for impath in impaths2:
pid = osp.basename(impath).split('_')[0]
pid = pid2label[pid]
train.append((impath, pid, camid))
camid += 1
num_train_pids += len(pids)
return (train, query, gallery) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/mwt', help='Root dir for saving models.')
parser.add_argument('--train_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--eval_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--output_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--gold_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--mode', default='train', choices=['train', 'predict'])
parser.add_argument('--lang', type=str, help='Language')
parser.add_argument('--shorthand', type=str, help='Treebank shorthand')
parser.add_argument('--no_dict', dest='ensemble_dict', action='store_false', help='Do not ensemble dictionary with seq2seq. By default ensemble a dict.')
parser.add_argument('--ensemble_early_stop', action='store_true', help='Early stopping based on ensemble performance.')
parser.add_argument('--dict_only', action='store_true', help='Only train a dictionary-based MWT expander.')
parser.add_argument('--hidden_dim', type=int, default=100)
parser.add_argument('--emb_dim', type=int, default=50)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--emb_dropout', type=float, default=0.5)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--max_dec_len', type=int, default=50)
parser.add_argument('--beam_size', type=int, default=1)
parser.add_argument('--attn_type', default='soft', choices=['soft', 'mlp', 'linear', 'deep'], help='Attention type')
parser.add_argument('--sample_train', type=float, default=1.0, help='Subsample training data.')
parser.add_argument('--optim', type=str, default='adam', help='sgd, adagrad, adam or adamax.')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--lr_decay', type=float, default=0.9)
parser.add_argument('--decay_epoch', type=int, default=30, help='Decay the lr starting from this epoch.')
parser.add_argument('--num_epoch', type=int, default=30)
parser.add_argument('--batch_size', type=int, default=50)
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--save_dir', type=str, default='saved_models/mwt', help='Root dir for saving models.')
parser.add_argument('--save_name', type=str, default=None, help='File name to save the model')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
args = parser.parse_args()
return args |
def load_processed_ct_images(npy_filepath, clip_range):
images = np.load(npy_filepath)
num_frames = len(images)
(left, right) = (int((num_frames * clip_range[0])), int((num_frames * clip_range[1])))
images = images[left:right]
num_frames = len(images)
shape = images.shape
if False:
from zqlib import imgs2vid
imgs2vid(images, 'test_image.avi')
images = np.asarray(images, dtype=np.float32)
images = (images / 255.0)
images = np.expand_dims(images, axis=1)
img_info = {'name': npy_filepath, 'num_frames': num_frames, 'clip_range': clip_range, 'shape': shape}
th_images = torch.from_numpy(images.copy()).float()
return (th_images, img_info) |
def deconv(x, channels, kernel=4, stride=2, padding='SAME', use_bias=True, sn=False, scope='deconv'):
with tf.variable_scope(scope):
x_shape = x.get_shape().as_list()
if (padding == 'SAME'):
output_shape = [x_shape[0], (x_shape[1] * stride), (x_shape[2] * stride), channels]
else:
output_shape = [x_shape[0], ((x_shape[1] * stride) + max((kernel - stride), 0)), ((x_shape[2] * stride) + max((kernel - stride), 0)), channels]
if sn:
w = tf.get_variable('kernel', shape=[kernel, kernel, channels, x.get_shape()[(- 1)]], initializer=weight_init, regularizer=weight_regularizer)
x = tf.nn.conv2d_transpose(x, filter=spectral_norm(w), output_shape=output_shape, strides=[1, stride, stride, 1], padding=padding)
if use_bias:
bias = tf.get_variable('bias', [channels], initializer=tf.constant_initializer(0.0))
x = tf.nn.bias_add(x, bias)
else:
x = tf.layers.conv2d_transpose(inputs=x, filters=channels, kernel_size=kernel, kernel_initializer=weight_init, kernel_regularizer=weight_regularizer, strides=stride, padding=padding, use_bias=use_bias)
return x |
.spark
def test_get_csr_matrix(spark, log2):
grouped_log = log2.groupBy('user_idx').agg(sf.collect_list('item_idx').alias('vector_items'), sf.collect_list('relevance').alias('vector_ratings'))
grouped_log = grouped_log.toPandas()
csr_matrix = get_csr_matrix(grouped_log['user_idx'], grouped_log['vector_items'], grouped_log['vector_ratings'])
actual_array = csr_matrix.toarray()
expected_array = np.array([[3.0, 1.0, 2.0, 0.0], [3.0, 0.0, 0.0, 4.0], [0.0, 3.0, 0.0, 0.0]])
assert np.array_equal(actual_array, expected_array) |
class Unetconv_norm_lrelu(nn.Module):
def __init__(self, feat_in, feat_out, kernel_size=(3, 3, 3), padding_size=(1, 1, 1), init_stride=(1, 1, 1), bias=False):
super(Unetconv_norm_lrelu, self).__init__()
self.conv_norm_lrelu = nn.Sequential(nn.Conv3d(feat_in, feat_out, kernel_size, init_stride, padding_size, bias=False), nn.InstanceNorm3d(feat_out), nn.LeakyReLU(inplace=True))
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
outputs = self.conv_norm_lrelu(inputs)
return outputs |
class BallQuery(Function):
def forward(ctx, min_radius: float, max_radius: float, sample_num: int, xyz: torch.Tensor, center_xyz: torch.Tensor) -> torch.Tensor:
assert center_xyz.is_contiguous()
assert xyz.is_contiguous()
assert (min_radius < max_radius)
(B, N, _) = xyz.size()
npoint = center_xyz.size(1)
idx = xyz.new_zeros(B, npoint, sample_num, dtype=torch.int)
ext_module.ball_query_forward(center_xyz, xyz, idx, b=B, n=N, m=npoint, min_radius=min_radius, max_radius=max_radius, nsample=sample_num)
if (torch.__version__ != 'parrots'):
ctx.mark_non_differentiable(idx)
return idx
def backward(ctx, a=None):
return (None, None, None, None) |
class Mish(Activation):
def __init__(self, activation, **kwargs):
super(Mish, self).__init__(activation, **kwargs)
self.__name__ = 'Mish' |
def remove_unreachable_nodes(graph):
for node in graph.nodes():
if (sum((graph.edge_weight((node, other)) for other in graph.neighbors(node))) == 0):
graph.del_node(node) |
def train_batchrl_agent(dataset, agent_tag, num_steps=120000, results_folder='/tmp/pong_results', seed=0, num_eval_eps=10):
print('Training off-policy agent in batch mode on the dataset...')
blockPrint()
config = make_td3_agent(make_td3_agent(), args=AttrDict(parent_folder=results_folder, env=make_env, max_steps=int(1000000.0), replay_size=int(1600000.0), alg='td3', layers=(128, 128), tb=agent_tag, actor_lr=0.0003, critic_lr=0.0003, epoch_len=2500, batch_size=1000, clip_target_range=((- 50), 50), num_envs=10, num_eval_envs=10, optimize_every=1, gamma=0.98, seed=seed), agent_name_attrs=['alg', 'seed', 'tb'])
del config.module_state_normalizer
del config.module_replay
config.module_replay = OldReplayBuffer()
config.never_done = True
config.min_experience_to_train_coda_attn = 0
agent = mrl.config_to_agent(config)
agent.replay_buffer.buffer.add_batch(*dataset)
enablePrint()
res = [np.mean(agent.eval(num_eval_eps).rewards)]
for epoch in tqdm(range((num_steps // 1000))):
for _ in range(1000):
agent.train_mode()
agent.config.env_steps += 1
agent.algorithm._optimize()
agent.eval_mode()
res += [np.mean(agent.eval(num_eval_eps).rewards)]
agent.save()
print('Done training agent!')
print('Average score over final 10 epochs: {}'.format(np.mean(res[(- 10):]))) |
def _empty_figure(title: str, plot_height: int, plot_width: int) -> Figure:
fig = Figure(x_range=[], y_range=[], plot_height=plot_height, plot_width=plot_width, title=title, x_axis_location='below', tools='hover', toolbar_location=None, background_fill_color='#fafafa')
fig.rect(x=0, y=0, width=0, height=0)
return fig |
def psnr(img1, img2, mask=None):
b = img1.size(0)
if (not (mask is None)):
b = img1.size(0)
mse_err = ((img1 - img2).pow(2) * mask)
mse_err = (mse_err.view(b, (- 1)).sum(dim=1) / (3 * mask.view(b, (- 1)).sum(dim=1).clamp(min=1)))
else:
mse_err = (img1 - img2).pow(2).view(b, (- 1)).mean(dim=1)
psnr = (10 * (1 / mse_err).log10())
return psnr |
class CategoricalCNNPolicy(StochasticPolicy):
def __init__(self, env_spec, filters, strides, padding, name='CategoricalCNNPolicy', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=None, output_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), output_b_init=tf.zeros_initializer(), layer_normalization=False):
assert isinstance(env_spec.action_space, akro.Discrete), 'CategoricalCNNPolicy only works with akro.Discrete action space.'
super().__init__(name, env_spec)
if isinstance(env_spec.observation_space, akro.Dict):
raise ValueError('CNN policies do not supportwith akro.Dict observation spaces.')
self._env_spec = env_spec
self._obs_dim = env_spec.observation_space.shape
self._action_dim = env_spec.action_space.n
self._filters = filters
self._strides = strides
self._padding = padding
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
self._f_prob = None
self._dist = None
self.model = CategoricalCNNModel(output_dim=self._action_dim, filters=filters, strides=strides, padding=padding, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization)
self._initialize()
def _initialize(self):
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
state_input = tf.compat.v1.placeholder(tf.float32, shape=((None, None) + self._obs_dim))
if isinstance(self.env_spec.observation_space, akro.Image):
augmented_state_input = tf.cast(state_input, tf.float32)
augmented_state_input /= 255.0
else:
augmented_state_input = state_input
self._dist = self.model.build(augmented_state_input).dist
self._f_prob = tf.compat.v1.get_default_session().make_callable([tf.argmax(self._dist.sample(seed=deterministic.get_tf_seed_stream()), (- 1)), self._dist.probs], feed_list=[state_input])
def build(self, state_input, name=None):
with tf.compat.v1.variable_scope(self._variable_scope):
if isinstance(self.env_spec.observation_space, akro.Image):
augmented_state_input = tf.cast(state_input, tf.float32)
augmented_state_input /= 255.0
else:
augmented_state_input = state_input
return self.model.build(augmented_state_input, name=name)
def input_dim(self):
return self._obs_dim
def distribution(self):
return self._dist
def vectorized(self):
return True
def get_action(self, observation):
(sample, prob) = self.get_actions([observation])
return (sample, {k: v[0] for (k, v) in prob.items()})
def get_actions(self, observations):
if (isinstance(self.env_spec.observation_space, akro.Image) and (len(observations[0].shape) < len(self.env_spec.observation_space.shape))):
observations = self.env_spec.observation_space.unflatten_n(observations)
(samples, probs) = self._f_prob(np.expand_dims(observations, 1))
return (np.squeeze(samples), dict(prob=np.squeeze(probs, axis=1)))
def clone(self, name):
new_policy = self.__class__(name=name, env_spec=self._env_spec, filters=self._filters, strides=self._strides, padding=self._padding, hidden_sizes=self._hidden_sizes, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, output_nonlinearity=self._output_nonlinearity, output_w_init=self._output_w_init, output_b_init=self._output_b_init, layer_normalization=self._layer_normalization)
new_policy.model.parameters = self.model.parameters
return new_policy
def __getstate__(self):
new_dict = super().__getstate__()
del new_dict['_f_prob']
del new_dict['_dist']
return new_dict
def __setstate__(self, state):
super().__setstate__(state)
self._initialize() |
_cache
def find_first_content_subclass(cls):
for base_cls in reversed(cls.mro()):
if ((base_cls is not ak.contents.Content) and issubclass(base_cls, ak.contents.Content)):
return base_cls
raise TypeError |
class SpeechTransformerEncoder(nn.Module):
def __init__(self, d_model: int=512, input_dim: int=80, d_ff: int=2048, num_layers: int=6, num_heads: int=8, ffnet_style: str='ff', dropout_p: float=0.3, pad_id: int=0) -> None:
super(SpeechTransformerEncoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.num_heads = num_heads
self.pad_id = pad_id
self.input_proj = Linear(input_dim, d_model)
self.input_norm = LayerNorm(d_model)
self.input_dropout = nn.Dropout(p=dropout_p)
self.positional_encoding = PositionalEncoding(d_model)
self.layers = nn.ModuleList([SpeechTransformerEncoderLayer(d_model, num_heads, d_ff, dropout_p, ffnet_style) for _ in range(num_layers)])
def forward(self, inputs: Tensor, input_lengths: Tensor=None) -> Tuple[(Tensor, list)]:
self_attn_mask = get_attn_pad_mask(inputs, input_lengths, inputs.size(1))
output = self.input_dropout((self.input_norm(self.input_proj(inputs)) + self.positional_encoding(inputs.size(1))))
for layer in self.layers:
(output, attn) = layer(output, self_attn_mask)
return output |
class CountEncoder(util.BaseEncoder, util.UnsupervisedTransformerMixin):
prefit_ordinal = True
encoding_relation = util.EncodingRelation.ONE_TO_ONE
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True, handle_unknown='value', handle_missing='value', min_group_size=None, combine_min_nan_groups=None, min_group_name=None, normalize=False):
super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df, handle_unknown=handle_unknown, handle_missing=handle_missing)
self.mapping = None
self.normalize = normalize
self.min_group_size = min_group_size
self.min_group_name = min_group_name
self.combine_min_nan_groups = combine_min_nan_groups
self.ordinal_encoder = None
self._check_set_create_attrs()
self._min_group_categories = {}
self._normalize = {}
self._min_group_name = {}
self._combine_min_nan_groups = {}
self._min_group_size = {}
self._handle_unknown = {}
self._handle_missing = {}
def _fit(self, X, y=None, **kwargs):
self.ordinal_encoder = OrdinalEncoder(verbose=self.verbose, cols=self.cols, handle_unknown='value', handle_missing='value')
self.ordinal_encoder = self.ordinal_encoder.fit(X)
X_ordinal = self.ordinal_encoder.transform(X)
self._check_set_create_dict_attrs()
self._fit_count_encode(X_ordinal, y)
return self
def _transform(self, X):
for col in self.cols:
X[col] = pd.Series([(el if (el is not None) else np.NaN) for el in X[col]], index=X[col].index)
if (self.handle_missing == 'value'):
if (not util.is_category(X[col].dtype)):
X[col] = X[col].fillna(np.nan)
if (self._min_group_size is not None):
if (col in self._min_group_categories.keys()):
X[col] = X[col].map(self._min_group_categories[col]).fillna(X[col])
X[col] = X[col].astype(object).map(self.mapping[col])
if isinstance(self._handle_unknown[col], (int, np.integer)):
X[col] = X[col].fillna(self._handle_unknown[col])
elif ((self._handle_unknown[col] == 'value') and X[col].isna().any() and (self._handle_missing[col] != 'return_nan')):
X[col] = X[col].replace(np.nan, 0)
elif ((self._handle_unknown[col] == 'error') and X[col].isna().any()):
raise ValueError(f'Missing data found in column {col} at transform time.')
return X
def _fit_count_encode(self, X_in, y):
X = X_in.copy(deep=True)
if (self.cols is None):
self.cols = X.columns
self.mapping = {}
for col in self.cols:
mapping_values = X[col].value_counts(normalize=self._normalize[col])
ordinal_encoding = [m['mapping'] for m in self.ordinal_encoder.mapping if (m['col'] == col)][0]
reversed_ordinal_enc = {v: k for (k, v) in ordinal_encoding.to_dict().items()}
mapping_values.index = mapping_values.index.map(reversed_ordinal_enc)
self.mapping[col] = mapping_values
if (self._handle_missing[col] == 'return_nan'):
self.mapping[col][np.NaN] = np.NaN
if any([(val is not None) for val in self._min_group_size.values()]):
self.combine_min_categories(X)
def combine_min_categories(self, X):
for (col, mapper) in self.mapping.items():
if (self._normalize[col] and isinstance(self._min_group_size[col], int)):
self._min_group_size[col] = (self._min_group_size[col] / X.shape[0])
elif ((not self._normalize) and isinstance(self._min_group_size[col], float)):
self._min_group_size[col] = (self._min_group_size[col] * X.shape[0])
if (self._combine_min_nan_groups[col] is True):
min_groups_idx = (mapper < self._min_group_size[col])
elif (self._combine_min_nan_groups[col] == 'force'):
min_groups_idx = ((mapper < self._min_group_size[col]) | mapper.index.isna())
else:
min_groups_idx = ((mapper < self._min_group_size[col]) & (~ mapper.index.isna()))
min_groups_sum = mapper.loc[min_groups_idx].sum()
if ((min_groups_sum > 0) and (min_groups_idx.sum() > 1) and (not min_groups_idx.loc[(~ min_groups_idx.index.isna())].all())):
if isinstance(self._min_group_name[col], str):
min_group_mapper_name = self._min_group_name[col]
else:
min_group_mapper_name = '_'.join([str(idx) for idx in mapper.loc[min_groups_idx].index.astype(str).sort_values()])
self._min_group_categories[col] = {cat: min_group_mapper_name for cat in mapper.loc[min_groups_idx].index.tolist()}
if (not min_groups_idx.all()):
mapper = mapper.loc[(~ min_groups_idx)]
mapper[min_group_mapper_name] = min_groups_sum
self.mapping[col] = mapper
def _check_set_create_attrs(self):
if (not ((self.combine_min_nan_groups in ['force', True, False, None]) or isinstance(self.combine_min_nan_groups, dict))):
raise ValueError("'combine_min_nan_groups' should be one of: ['force', True, False, None] or type dict.")
if ((self.handle_missing == 'return_nan') and (self.combine_min_nan_groups == 'force')):
raise ValueError("Cannot have `handle_missing` == 'return_nan' and 'combine_min_nan_groups' == 'force' for all columns.")
if ((self.combine_min_nan_groups is not None) and (self.min_group_size is None)):
pass
if ((self.min_group_name is not None) and (self.min_group_size is None)):
raise ValueError('`min_group_name` only works when `min_group_size` is set for all columns.')
if (self.combine_min_nan_groups is None):
self.combine_min_nan_groups = True
def _check_set_create_dict_attrs(self):
dict_attrs = {'normalize': False, 'min_group_name': None, 'combine_min_nan_groups': True, 'min_group_size': None, 'handle_unknown': 'value', 'handle_missing': 'value'}
for (attr_name, attr_default) in dict_attrs.items():
attr = copy(getattr(self, attr_name))
if isinstance(attr, dict):
for col in self.cols:
if (col not in attr):
attr[col] = attr_default
setattr(self, ('_' + attr_name), attr)
else:
attr_dict = {}
for col in self.cols:
attr_dict[col] = attr
setattr(self, ('_' + attr_name), attr_dict)
for col in self.cols:
if ((self._handle_missing[col] == 'return_nan') and (self._combine_min_nan_groups[col] == 'force')):
raise ValueError(f"Cannot have `handle_missing` == 'return_nan' and 'combine_min_nan_groups' == 'force' for columns `{col}`.")
if ((self._combine_min_nan_groups[col] is not True) and (self._min_group_size[col] is None)):
raise ValueError(f'`combine_min_nan_groups` only works when `min_group_size` is set for column {col}.')
if ((self._min_group_name[col] is not None) and (self._min_group_size[col] is None)):
raise ValueError(f'`min_group_name` only works when `min_group_size` is set for column {col}.') |
def find_ops(optype):
gd = tf.get_default_graph()
return [var for var in gd.get_operations() if (var.type == optype)] |
def train_model_but_load_prev_model_weights(params: Params, serialization_dir: str, prev_best_model: Model, file_friendly_logging: bool=False, recover: bool=False, force: bool=False) -> Model:
prepare_environment(params)
create_serialization_dir(params, serialization_dir, recover)
prepare_global_logging(serialization_dir, file_friendly_logging)
cuda_device = params.params.get('trainer').get('cuda_device', (- 1))
if isinstance(cuda_device, list):
for device in cuda_device:
check_for_gpu(device)
else:
check_for_gpu(cuda_device)
params.to_file(os.path.join(serialization_dir, CONFIG_NAME))
all_datasets = datasets_from_params(params)
datasets_for_vocab_creation = set(params.pop('datasets_for_vocab_creation', all_datasets))
for dataset in datasets_for_vocab_creation:
if (dataset not in all_datasets):
raise ConfigurationError(f"invalid 'dataset_for_vocab_creation' {dataset}")
logger.info('From dataset instances, %s will be considered for vocabulary creation.', ', '.join(datasets_for_vocab_creation))
vocab = Vocabulary.from_params(params.pop('vocabulary', {}), (instance for (key, dataset) in all_datasets.items() for instance in dataset if (key in datasets_for_vocab_creation)))
model = Model.from_params(vocab=vocab, params=params.pop('model'))
model = transfer_prev_model_weights_to_new_model(prev_best_model, model)
vocab.save_to_files(os.path.join(serialization_dir, 'vocabulary'))
iterator = DataIterator.from_params(params.pop('iterator'))
iterator.index_with(vocab)
validation_iterator_params = params.pop('validation_iterator', None)
if validation_iterator_params:
validation_iterator = DataIterator.from_params(validation_iterator_params)
validation_iterator.index_with(vocab)
else:
validation_iterator = None
train_data = all_datasets['train']
validation_data = all_datasets.get('validation')
test_data = all_datasets.get('test')
trainer_params = params.pop('trainer')
no_grad_regexes = trainer_params.pop('no_grad', ())
for (name, parameter) in model.named_parameters():
if any((re.search(regex, name) for regex in no_grad_regexes)):
parameter.requires_grad_(False)
trainer = Trainer.from_params(model=model, serialization_dir=serialization_dir, iterator=iterator, train_data=train_data, validation_data=validation_data, params=trainer_params, validation_iterator=validation_iterator)
evaluate_on_test = params.pop_bool('evaluate_on_test', False)
params.assert_empty('base train command')
try:
metrics = trainer.train()
except KeyboardInterrupt:
if os.path.exists(os.path.join(serialization_dir, _DEFAULT_WEIGHTS)):
logging.info('Training interrupted by the user. Attempting to create a model archive using the current best epoch weights.')
archive_model(serialization_dir, files_to_archive=params.files_to_archive)
raise
archive_model(serialization_dir, files_to_archive=params.files_to_archive)
logger.info('Loading the best epoch weights.')
best_model_state_path = os.path.join(serialization_dir, 'best.th')
best_model_state = torch.load(best_model_state_path)
best_model = model
best_model.load_state_dict(best_model_state)
if (test_data and evaluate_on_test):
logger.info('The model will be evaluated using the best epoch weights.')
test_metrics = evaluate(best_model, test_data, (validation_iterator or iterator), cuda_device=trainer._cuda_devices[0])
for (key, value) in test_metrics.items():
metrics[('test_' + key)] = value
elif test_data:
logger.info("To evaluate on the test set after training, pass the 'evaluate_on_test' flag, or use the 'allennlp evaluate' command.")
dump_metrics(os.path.join(serialization_dir, 'metrics.json'), metrics, log=True)
return best_model |
.skipif((not _ti_core.GGUI_AVAILABLE), reason='GGUI Not Available')
_utils.test(arch=supported_archs)
def test_draw_lines():
N = 10
particles_pos = ti.Vector.field(3, dtype=ti.f32, shape=N)
points_pos = ti.Vector.field(3, dtype=ti.f32, shape=N)
def init_points_pos(points: ti.template()):
for i in range(points.shape[0]):
points[i] = [i for j in ti.static(range(3))]
init_points_pos(particles_pos)
init_points_pos(points_pos)
window = ti.ui.Window('Test for Drawing 3d-lines', (768, 768), show_window=False)
canvas = window.get_canvas()
scene = window.get_scene()
camera = ti.ui.Camera()
camera.position(0, 5, (- 10))
camera.lookat(3, 3, 1)
def render():
scene.set_camera(camera)
scene.ambient_light((0.8, 0.8, 0.8))
scene.point_light(pos=(0.5, 1.5, 1.5), color=(1, 1, 1))
scene.particles(particles_pos, color=(0.68, 0.26, 0.19), radius=0.5)
scene.lines(points_pos, color=(0.28, 0.68, 0.99), width=5.0)
canvas.scene(scene)
for _ in range(RENDER_REPEAT):
render()
window.get_image_buffer_as_numpy()
render()
verify_image(window.get_image_buffer_as_numpy(), 'test_draw_lines')
window.destroy() |
class VisdomLinePlotter(object):
def __init__(self, env_name='main'):
self.viz = visdom.Visdom()
self.viz.check_connection()
self.env = env_name
self.plots = {}
def plot(self, var_name, split_name, x, y):
if (var_name not in self.plots):
self.plots[var_name] = self.viz.line(X=np.array([x, x]), Y=np.array([y, y]), env=self.env, opts=dict(legend=[split_name], title=var_name, xlabel='Epochs', ylabel=var_name))
else:
self.viz.line(X=np.array([x]), Y=np.array([y]), env=self.env, win=self.plots[var_name], name=split_name, update='append') |
class TuneAnalysis():
model_kwargs: dict
train_kwargs: dict
metric: float
additional_metrics: dict
search_space: dict
results: Any |
def test1d_mask():
data_pts = np.arange(1000)
np.random.shuffle(data_pts)
bad_idx = np.nonzero((data_pts == 400))
nearest_idx_1 = np.nonzero((data_pts == 399))
nearest_idx_2 = np.nonzero((data_pts == 390))
kdtree = KDTree(data_pts, leafsize=15)
query_pts = np.arange(399.9, 299.9, (- 10))
query_mask = np.zeros(data_pts.shape[0]).astype(bool)
query_mask[bad_idx] = True
(dist, idx) = kdtree.query(query_pts, mask=query_mask)
assert (idx[0] == nearest_idx_1)
assert np.isclose(dist[0], 0.9)
assert (idx[1] == nearest_idx_2)
assert np.isclose(dist[1], 0.1) |
def string_to_dict(to_convert):
return {s.split('=', 1)[0]: s.split('=', 1)[1] for s in to_convert.split(' ') if (len(s) > 0)} |
def prepare_results(p, r, f):
return '\t{}:\t{}: {:5.2f}\t{}: {:5.2f}\t{}: {:5.2f}'.format(metric, 'P', (100.0 * p), 'R', (100.0 * r), 'F1', (100.0 * f)) |
class netcdf_variable():
def __init__(self, data, typecode, size, shape, dimensions, attributes=None, maskandscale=False):
self.data = data
self._typecode = typecode
self._size = size
self._shape = shape
self.dimensions = dimensions
self.maskandscale = maskandscale
self._attributes = (attributes or {})
for (k, v) in self._attributes.items():
self.__dict__[k] = v
def __setattr__(self, attr, value):
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def isrec(self):
return (bool(self.data.shape) and (not self._shape[0]))
isrec = property(isrec)
def shape(self):
return self.data.shape
shape = property(shape)
def getValue(self):
return self.data.item()
def assignValue(self, value):
if (not self.data.flags.writeable):
raise RuntimeError('variable is not writeable')
self.data[:] = value
def typecode(self):
return self._typecode
def itemsize(self):
return self._size
def __getitem__(self, index):
if (not self.maskandscale):
return self.data[index]
data = self.data[index].copy()
missing_value = self._get_missing_value()
data = self._apply_missing_value(data, missing_value)
scale_factor = self._attributes.get('scale_factor')
add_offset = self._attributes.get('add_offset')
if ((add_offset is not None) or (scale_factor is not None)):
data = data.astype(np.float64)
if (scale_factor is not None):
data = (data * scale_factor)
if (add_offset is not None):
data += add_offset
return data
def __setitem__(self, index, data):
if self.maskandscale:
missing_value = (self._get_missing_value() or getattr(data, 'fill_value', 999999))
self._attributes.setdefault('missing_value', missing_value)
self._attributes.setdefault('_FillValue', missing_value)
data = ((data - self._attributes.get('add_offset', 0.0)) / self._attributes.get('scale_factor', 1.0))
data = np.ma.asarray(data).filled(missing_value)
if ((self._typecode not in 'fd') and (data.dtype.kind == 'f')):
data = np.round(data)
if self.isrec:
if isinstance(index, tuple):
rec_index = index[0]
else:
rec_index = index
if isinstance(rec_index, slice):
recs = ((rec_index.start or 0) + len(data))
else:
recs = (rec_index + 1)
if (recs > len(self.data)):
shape = ((recs,) + self._shape[1:])
try:
self.data.resize(shape)
except ValueError:
dtype = self.data.dtype
self.__dict__['data'] = np.resize(self.data, shape).astype(dtype)
self.data[index] = data
def _default_encoded_fill_value(self):
nc_type = REVERSE[(self.typecode(), self.itemsize())]
return FILLMAP[nc_type]
def _get_encoded_fill_value(self):
if ('_FillValue' in self._attributes):
fill_value = np.array(self._attributes['_FillValue'], dtype=self.data.dtype).tobytes()
if (len(fill_value) == self.itemsize()):
return fill_value
else:
return self._default_encoded_fill_value()
else:
return self._default_encoded_fill_value()
def _get_missing_value(self):
if ('_FillValue' in self._attributes):
missing_value = self._attributes['_FillValue']
elif ('missing_value' in self._attributes):
missing_value = self._attributes['missing_value']
else:
missing_value = None
return missing_value
def _apply_missing_value(data, missing_value):
if (missing_value is None):
newdata = data
else:
try:
missing_value_isnan = np.isnan(missing_value)
except (TypeError, NotImplementedError):
missing_value_isnan = False
if missing_value_isnan:
mymask = np.isnan(data)
else:
mymask = (data == missing_value)
newdata = np.ma.masked_where(mymask, data)
return newdata |
def evaluate(datasource, select, feature_metas, feature_column_names, label_meta, result_table, validation_metrics=['accuracy_score'], is_pai=False, pai_table='', model_params=None, transform_fn=None, feature_column_code=''):
if (not is_pai):
conn = db.connect_with_data_source(datasource)
else:
conn = PaiIOConnection.from_table(pai_table)
dpred = xgb_dataset(datasource, 'predict.txt', select, feature_metas, feature_column_names, label_meta, is_pai, pai_table, True, True, batch_size=DEFAULT_PREDICT_BATCH_SIZE, transform_fn=transform_fn, feature_column_code=feature_column_code)
bst = xgb.Booster({'nthread': 4})
bst.load_model('my_model')
if (not model_params):
model_params = load_metadata('model_meta.json')['attributes']
print('Start evaluating XGBoost model...')
feature_file_id = 0
for pred_dmatrix in dpred:
evaluate_and_store_result(bst, pred_dmatrix, feature_file_id, validation_metrics, model_params, feature_column_names, label_meta, is_pai, conn, result_table)
feature_file_id += 1
print(('Done evaluating. Result table : %s' % result_table)) |
def test_with_gauss_fluctuations():
x_true = (- 2.0)
minimizer = Minuit()
bounds = ((- 10), 10)
obs = zfit.Space('x', limits=bounds)
mean = zfit.Parameter('mean', 0)
sigma = zfit.Parameter('sigma', 1.0)
model = zfit.pdf.Gauss(obs=obs, mu=mean, sigma=sigma)
npzfile = f'{notebooks_dir}/toys/FC_toys_{x_true}.npz'
data = zfit.data.Data.from_numpy(obs=obs, array=np.load(npzfile)['x'])
nll = UnbinnedNLL(model=model, data=data)
minimum = minimizer.minimize(loss=nll)
minimum.hesse()
toys_fname = f'{notebooks_dir}/toys/FC_toys_{x_true}.yml'
calculator = FrequentistCalculator.from_yaml(toys_fname, minimum, minimizer)
keys = np.unique([k[0].value for k in calculator.keys()])
keys.sort()
poinull = POIarray(mean, keys)
ci = ConfidenceInterval(calculator, poinull, qtilde=False)
with pytest.warns(UserWarning):
ci.interval(alpha=0.05, printlevel=0)
ci = ConfidenceInterval(calculator, poinull, qtilde=True)
ci.interval(alpha=0.05, printlevel=0) |
class TestAPSimple(unittest.TestCase):
def setUp(self):
self.car1 = {'trans': (1, 1, 1), 'name': 'car', 'score': 1.0}
self.car2 = {'trans': (3, 3, 1), 'name': 'car', 'score': 0.7}
self.bicycle1 = {'trans': (5, 5, 1), 'name': 'bicycle', 'score': 1.0}
self.bicycle2 = {'trans': (7, 7, 1), 'name': 'bicycle', 'score': 0.7}
def check_ap(self, gts: Dict[(str, List[Dict])], preds: Dict[(str, List[Dict])], target_ap: float, detection_name: str='car', dist_th: float=2.0, min_precision: float=0.1, min_recall: float=0.1) -> None:
metric_data = get_metric_data(gts, preds, detection_name, dist_th)
ap = calc_ap(metric_data, min_precision=min_precision, min_recall=min_recall)
self.assertGreaterEqual(0.01, abs((ap - target_ap)), msg='Incorrect AP')
def test_no_data(self):
gts = {'sample1': [self.car1]}
preds = {'sample1': [self.car1]}
empty = {'sample1': []}
self.check_ap(empty, preds, target_ap=0.0)
self.check_ap(gts, empty, target_ap=0.0)
self.check_ap(empty, empty, target_ap=0.0)
def test_one_sample(self):
self.check_ap({'sample1': [self.car1]}, {'sample1': [self.car1]}, target_ap=1.0, detection_name='car')
self.check_ap({'sample1': [self.car1, self.car2]}, {'sample1': [self.car1]}, target_ap=(0.4 / 0.9), detection_name='car')
self.check_ap({'sample1': [self.car1]}, {'sample1': [self.car1, self.car2]}, target_ap=1.0, detection_name='car')
self.check_ap({'sample1': [self.car2]}, {'sample1': [self.car1, self.car2]}, target_ap=(((0.8 * 0.4) / 2) / (0.9 * 0.9)), detection_name='car')
self.check_ap({'sample1': [self.car1]}, {'sample1': [self.car1, self.bicycle1]}, target_ap=1.0, detection_name='car')
def test_two_samples(self):
self.check_ap({'sample1': [self.car1], 'sample2': [self.car2]}, {'sample1': [self.car1], 'sample2': [self.car2]}, target_ap=1.0, detection_name='car')
self.check_ap({'sample1': [self.car1], 'sample2': []}, {'sample1': [self.car1], 'sample2': []}, target_ap=1.0, detection_name='car')
self.check_ap({'sample1': [self.car1], 'sample2': [self.car2]}, {'sample1': [self.car1], 'sample2': []}, target_ap=(0.4 / 0.9), detection_name='car') |
def copy_docstring_templates(pydoc_files, output_dir):
with open(os.path.join(output_dir, 'docstring_status'), 'w') as status_file:
for pydoc_file in pydoc_files:
file_in = open(pydoc_file, 'r').read()
output_pathname = os.path.join(output_dir, os.path.basename(pydoc_file).replace('_template.h', '.h'))
with open(output_pathname, 'w') as file_out:
file_out.write(file_in)
status_file.write('DONE') |
def check_multiwoz_folders(data_folder):
files_str = '/data.json'
if (not os.path.exists((data_folder + files_str))):
err_msg = ('the folder %s does not exist (it is expected in the MultiWOZ dataset)' % (data_folder + files_str))
raise FileNotFoundError(err_msg) |
class SeparableConv2d_aspp(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, padding=0):
super(SeparableConv2d_aspp, self).__init__()
self.depthwise = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias)
self.depthwise_bn = nn.BatchNorm2d(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
self.pointwise_bn = nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
def forward(self, x):
x = self.depthwise(x)
x = self.depthwise_bn(x)
x = self.relu(x)
x = self.pointwise(x)
x = self.pointwise_bn(x)
x = self.relu(x)
return x |
def _seg_24():
return [(9400, 'M', u'c'), (9401, 'M', u'd'), (9402, 'M', u'e'), (9403, 'M', u'f'), (9404, 'M', u'g'), (9405, 'M', u'h'), (9406, 'M', u'i'), (9407, 'M', u'j'), (9408, 'M', u'k'), (9409, 'M', u'l'), (9410, 'M', u'm'), (9411, 'M', u'n'), (9412, 'M', u'o'), (9413, 'M', u'p'), (9414, 'M', u'q'), (9415, 'M', u'r'), (9416, 'M', u's'), (9417, 'M', u't'), (9418, 'M', u'u'), (9419, 'M', u'v'), (9420, 'M', u'w'), (9421, 'M', u'x'), (9422, 'M', u'y'), (9423, 'M', u'z'), (9424, 'M', u'a'), (9425, 'M', u'b'), (9426, 'M', u'c'), (9427, 'M', u'd'), (9428, 'M', u'e'), (9429, 'M', u'f'), (9430, 'M', u'g'), (9431, 'M', u'h'), (9432, 'M', u'i'), (9433, 'M', u'j'), (9434, 'M', u'k'), (9435, 'M', u'l'), (9436, 'M', u'm'), (9437, 'M', u'n'), (9438, 'M', u'o'), (9439, 'M', u'p'), (9440, 'M', u'q'), (9441, 'M', u'r'), (9442, 'M', u's'), (9443, 'M', u't'), (9444, 'M', u'u'), (9445, 'M', u'v'), (9446, 'M', u'w'), (9447, 'M', u'x'), (9448, 'M', u'y'), (9449, 'M', u'z'), (9450, 'M', u'0'), (9451, 'V'), (10764, 'M', u''), (10765, 'V'), (10868, '3', u'::='), (10869, '3', u'=='), (10870, '3', u'==='), (10871, 'V'), (10972, 'M', u''), (10973, 'V'), (11124, 'X'), (11126, 'V'), (11158, 'X'), (11160, 'V'), (11264, 'M', u''), (11265, 'M', u''), (11266, 'M', u''), (11267, 'M', u''), (11268, 'M', u''), (11269, 'M', u''), (11270, 'M', u''), (11271, 'M', u''), (11272, 'M', u''), (11273, 'M', u''), (11274, 'M', u''), (11275, 'M', u''), (11276, 'M', u''), (11277, 'M', u''), (11278, 'M', u''), (11279, 'M', u''), (11280, 'M', u''), (11281, 'M', u''), (11282, 'M', u''), (11283, 'M', u''), (11284, 'M', u''), (11285, 'M', u''), (11286, 'M', u''), (11287, 'M', u''), (11288, 'M', u''), (11289, 'M', u''), (11290, 'M', u''), (11291, 'M', u''), (11292, 'M', u''), (11293, 'M', u''), (11294, 'M', u''), (11295, 'M', u''), (11296, 'M', u''), (11297, 'M', u''), (11298, 'M', u''), (11299, 'M', u'')] |
def SymmetricPresentation(n):
from sage.groups.perm_gps.permgroup_named import SymmetricGroup
from sage.groups.free_group import _lexi_gen
n = Integer(n)
if (n <= 1):
return FinitelyPresentedGroup(FreeGroup(()), ())
perm_rep = SymmetricGroup(n)
GAP_fp_rep = libgap.Image(libgap.IsomorphismFpGroupByGenerators(perm_rep, perm_rep.gens()))
image_gens = GAP_fp_rep.FreeGeneratorsOfFpGroup()
name_itr = _lexi_gen()
F = FreeGroup([next(name_itr) for x in perm_rep.gens()])
ret_rls = tuple([F(rel_word.TietzeWordAbstractWord(image_gens).sage()) for rel_word in GAP_fp_rep.RelatorsOfFpGroup()])
return FinitelyPresentedGroup(F, ret_rls) |
class ImageDecoder(object):
def __init__(self):
self._sess = tf.Session()
self._encoded_jpeg = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3)
def decode_jpeg(self, encoded_jpeg):
image = self._sess.run(self._decode_jpeg, feed_dict={self._encoded_jpeg: encoded_jpeg})
assert (len(image.shape) == 3)
assert (image.shape[2] == 3)
return image |
def test_remove(default_test_case):
stmt_1 = MagicMock(st.Statement)
stmt_2 = MagicMock(st.Statement)
stmt_3 = MagicMock(st.Statement)
default_test_case._statements.extend([stmt_1, stmt_2, stmt_3])
default_test_case.remove(1)
assert (default_test_case._statements == [stmt_1, stmt_3]) |
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
net = None
norm_layer = get_norm_layer(norm_type=norm)
if (netD == 'basic'):
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif (netD == 'n_layers'):
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif (netD == 'pixel'):
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
else:
raise NotImplementedError(('Discriminator model name [%s] is not recognized' % net))
return init_net(net, init_type, init_gain, gpu_ids) |
.skipif((ctypes is None), reason='ctypes not available on this python installation')
class TestNdpointerCFunc(object):
def test_arguments(self):
c_forward_pointer.restype = ctypes.c_void_p
c_forward_pointer.argtypes = (ndpointer(ndim=2),)
c_forward_pointer(np.zeros((2, 3)))
assert_raises(ctypes.ArgumentError, c_forward_pointer, np.zeros((2, 3, 4)))
.parametrize('dt', [float, np.dtype(dict(formats=['<i4', '<i4'], names=['a', 'b'], offsets=[0, 2], itemsize=6))], ids=['float', 'overlapping-fields'])
def test_return(self, dt):
arr = np.zeros((2, 3), dt)
ptr_type = ndpointer(shape=arr.shape, dtype=arr.dtype)
c_forward_pointer.restype = ptr_type
c_forward_pointer.argtypes = (ptr_type,)
arr2 = c_forward_pointer(arr)
assert_equal(arr2.dtype, arr.dtype)
assert_equal(arr2.shape, arr.shape)
assert_equal(arr2.__array_interface__['data'], arr.__array_interface__['data'])
def test_vague_return_value(self):
arr = np.zeros((2, 3))
ptr_type = ndpointer(dtype=arr.dtype)
c_forward_pointer.restype = ptr_type
c_forward_pointer.argtypes = (ptr_type,)
ret = c_forward_pointer(arr)
assert_(isinstance(ret, ptr_type)) |
class ImportantConfigNode(TreeConfigNode):
def modify_label(self, label):
return ('IMPORTANT=' + str(label))
def init2(self, node_name):
self.props['is_important'] = node_name
def get_children(self):
return [] |
def eval_f1(ref, pred):
assert (len(ref) == len(pred) > 0)
precisions = []
recalls = []
for (i, s) in enumerate(pred):
ref_set = set()
for rs in ref[i]:
for w in rs:
ref_set.add(w)
pred_set = set()
for w in s:
pred_set.add(w)
p = 0
for w in s:
if (w in ref_set):
p += 1
if (len(s) > 0):
p /= len(s)
r = 0
for rs in ref[i]:
for w in rs:
if (w in pred_set):
r += 1
tot_l = sum([len(rs) for rs in ref[i]])
if (tot_l > 0):
r /= tot_l
precisions.append(p)
recalls.append(r)
precision = (sum(precisions) / len(precisions))
recall = (sum(recalls) / len(recalls))
return (0.0 if (precision == recall == 0) else (((2 * precision) * recall) / (precision + recall))) |
_experiment
def vpg_cartpole(ctxt=None, seed=1):
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy', env_spec=env.spec, hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = VPG(env_spec=env.spec, policy=policy, baseline=baseline, max_path_length=100, discount=0.99, optimizer_args=dict(learning_rate=0.01))
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=10000) |
class ExampleOperation(nn.Module):
def __init__(self, channels):
super(ExampleOperation, self).__init__()
self.seq = nn.Sequential(nn.Conv2d(in_channels=channels, out_channels=channels, kernel_size=(3, 3), padding=1), nn.BatchNorm2d(num_features=channels), nn.ReLU(inplace=True))
def forward(self, x):
return self.seq(x) |
def process_ptb3_revised(paths, dataset_name, *args):
input_dir = os.path.join(paths['CONSTITUENCY_BASE'], 'english', 'LDC2015T13_eng_news_txt_tbnk-ptb_revised')
if (not os.path.exists(input_dir)):
backup_input_dir = os.path.join(paths['CONSTITUENCY_BASE'], 'english', 'LDC2015T13')
if (not os.path.exists(backup_input_dir)):
raise FileNotFoundError(('Could not find ptb3-revised in either %s or %s' % (input_dir, backup_input_dir)))
input_dir = backup_input_dir
bracket_dir = os.path.join(input_dir, 'data', 'penntree')
output_dir = paths['CONSTITUENCY_DATA_DIR']
label_map = {'ADJ-PRD': 'ADJP-PRD'}
train_trees = []
for i in tqdm(range(2, 22)):
new_trees = tree_reader.read_directory(os.path.join(bracket_dir, ('%02d' % i)))
new_trees = [t.remap_constituent_labels(label_map) for t in new_trees]
train_trees.extend(new_trees)
move_tregex = '_ROOT_ <1 __=home <2 /^[.]$/=move'
move_tsurgeon = 'move move >-1 home'
print('Moving sentence final punctuation if necessary')
with tsurgeon.Tsurgeon() as tsurgeon_processor:
train_trees = [tsurgeon_processor.process(tree, move_tregex, move_tsurgeon)[0] for tree in tqdm(train_trees)]
dev_trees = tree_reader.read_directory(os.path.join(bracket_dir, '22'))
dev_trees = [t.remap_constituent_labels(label_map) for t in dev_trees]
test_trees = tree_reader.read_directory(os.path.join(bracket_dir, '23'))
test_trees = [t.remap_constituent_labels(label_map) for t in test_trees]
print(('Read %d train trees, %d dev trees, and %d test trees' % (len(train_trees), len(dev_trees), len(test_trees))))
datasets = [train_trees, dev_trees, test_trees]
write_dataset(datasets, output_dir, dataset_name) |
def test_sdca_hinge(bin_train_data):
(X_bin, y_bin) = bin_train_data
clf = SDCAClassifier(loss='hinge', random_state=0)
clf.fit(X_bin, y_bin)
assert (not hasattr(clf, 'predict_proba'))
assert (clf.score(X_bin, y_bin) == 1.0) |
def run_ger(target: str, n: int, m: int, tile_size_x: int, tile_size_y: int, alpha: float=1, veclen: int=1, eps: float=1e-06):
if (target == 'pure'):
(ger_node, state, sdfg) = pure_graph('pure', dace.float32, veclen)
ger_node.expand(sdfg, state)
sdfg.apply_transformations_repeated([InlineSDFG])
elif (target == 'fpga'):
sdfg = fpga_graph(dace.float32, veclen, tile_size_x, tile_size_y)
else:
raise ValueError('Unsupported target')
x = aligned_ndarray(np.random.rand(m).astype(np.float32), alignment=(4 * veclen))
y = aligned_ndarray(np.random.rand(n).astype(np.float32), alignment=(4 * veclen))
A = aligned_ndarray(np.random.rand(m, n).astype(np.float32), alignment=(4 * veclen))
res = aligned_ndarray(np.empty(A.shape, dtype=A.dtype), alignment=(4 * veclen))
ref = aligned_ndarray(np.empty(A.shape, dtype=A.dtype), alignment=(4 * veclen))
res[:] = A[:]
ref[:] = A[:]
with dace.config.set_temporary('compiler', 'allow_view_arguments', value=True):
sdfg(x=x, y=y, A=A, res=res, m=dace.int32(m), n=dace.int32(n), alpha=alpha)
ref = scipy.linalg.blas.sger(alpha=alpha, x=x, y=y, a=ref)
diff = np.linalg.norm((res - ref))
if (diff >= ((eps * n) * m)):
raise RuntimeError(f'Validation failed: {diff}')
else:
print('Validation successful.')
return sdfg |
class ShelveDataset(Dataset):
def __init__(self, fname, key=None, norm_and_scale=False):
self.path = Path('{}.dat'.format(fname, 'dat'))
if (not self.path.exists()):
raise RuntimeError('{} does not exist.'.format(self.path))
self.data = shelve.open(str(fname.resolve()))
self.norm_and_scale = norm_and_scale
self.size = len(self.data)
self.lengths = self.read_sequence_lengths()
def read_sequence_lengths(self):
lengths = []
for x in self.data:
lengths.append(len(self.data[str(x)]))
return lengths
def to_torch(batch):
batch = pad_video_sequence(batch)
batch = batch.transpose(0, 1)
return batch
def __getitem__(self, idx):
if self.norm_and_scale:
feats = self.data[str(idx)]
feats = preprocessing.normalize(feats)
return feats
else:
return np.array(self.data[str(idx)])
def __len__(self):
return self.size
def __repr__(self):
s = "{} '{}' ({} samples)\n".format(self.__class__.__name__, self.path.name, self.__len__())
return s |
def conv3otherRelu(in_planes, out_planes, kernel_size=None, stride=None, padding=None):
if (kernel_size is None):
kernel_size = 3
assert isinstance(kernel_size, (int, tuple)), 'kernel_size is not in (int, tuple)!'
if (stride is None):
stride = 1
assert isinstance(stride, (int, tuple)), 'stride is not in (int, tuple)!'
if (padding is None):
padding = 1
assert isinstance(padding, (int, tuple)), 'padding is not in (int, tuple)!'
return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=True), nn.ReLU(inplace=True)) |
def removespaces(expr):
expr = expr.strip()
if (len(expr) <= 1):
return expr
expr2 = expr[0]
for i in range(1, (len(expr) - 1)):
if ((expr[i] == ' ') and ((expr[(i + 1)] in '()[]{}=+-/* ') or (expr[(i - 1)] in '()[]{}=+-/* '))):
continue
expr2 = (expr2 + expr[i])
expr2 = (expr2 + expr[(- 1)])
return expr2 |
def random_planetoid_splits(num_classes, y, train_num, seed):
np.random.seed(seed)
indices = []
for i in range(num_classes):
index = (y == i).nonzero().view((- 1))
index = index[torch.randperm(index.size(0))]
indices.append(index)
train_index = torch.cat([i[:train_num] for i in indices], dim=0)
rest_index = torch.cat([i[train_num:] for i in indices], dim=0)
rest_index = rest_index[torch.randperm(rest_index.size(0))]
val_index = rest_index[:500]
test_index = rest_index[500:1500]
return (train_index, val_index, test_index) |
def test_tabular_multi_output_independent_masker():
(model, data) = common.basic_xgboost_scenario(100)
common.test_additivity(shap.explainers.ExactExplainer, model.predict_proba, shap.maskers.Independent(data), data) |
class BaseQuantizationConfig(object):
def __init__(self, bit_list: List[int], thresholds_shift: List[int]):
self.bit_list = bit_list
self.thresholds_shift = thresholds_shift
def update_bit_list(self, bit_list):
self.bit_list = bit_list |
.parametrize('implementation, dtype', [pytest.param('pure', dace.float32), pytest.param('pure', dace.float64), pytest.param('MKL', dace.float32, marks=pytest.mark.mkl), pytest.param('MKL', dace.float64, marks=pytest.mark.mkl), pytest.param('cuBLAS', dace.float32, marks=pytest.mark.gpu), pytest.param('cuBLAS', dace.float64, marks=pytest.mark.gpu)])
def test_dot(implementation, dtype):
storage = (dace.StorageType.GPU_Global if (implementation == 'cuBLAS') else dace.StorageType.Default)
sdfg = make_sdfg(implementation, dtype, storage=storage)
np_dtype = getattr(np, dtype.to_string())
dot = sdfg.compile()
size = 32
x = np.ndarray(size, dtype=np_dtype)
y = np.ndarray(size, dtype=np_dtype)
result = np.ndarray(1, dtype=np_dtype)
x[:] = 2.5
y[:] = 2
result[0] = 0
dot(x=x, y=y, result=result, n=size)
ref = np.dot(x, y)
diff = abs((result[0] - ref))
assert (diff < (1e-06 * ref)) |
def amsterdam_literal_train(listener=False):
data = [('light purple', 0, [(260.0, 45.0, 100.0), (260.0, 100.0, 100.0)]), ('purple', 0, [(260.0, 45.0, 100.0), (260.0, 100.0, 100.0)]), ('light', 0, [(260.0, 45.0, 100.0), (260.0, 100.0, 100.0)]), ('', 0, [(260.0, 45.0, 100.0), (260.0, 100.0, 100.0)]), ('light purple', 1, [(260.0, 100.0, 100.0), (260.0, 45.0, 100.0)]), ('purple', 1, [(260.0, 100.0, 100.0), (260.0, 45.0, 100.0)]), ('light', 1, [(260.0, 100.0, 100.0), (260.0, 45.0, 100.0)]), ('', 1, [(260.0, 100.0, 100.0), (260.0, 45.0, 100.0)]), ('pinkish purple', 1, [(260.0, 100.0, 100.0), (300.0, 100.0, 100.0)]), ('purple', 1, [(260.0, 100.0, 100.0), (300.0, 100.0, 100.0)]), ('pinkish', 1, [(260.0, 100.0, 100.0), (300.0, 100.0, 100.0)]), ('', 1, [(260.0, 100.0, 100.0), (300.0, 100.0, 100.0)]), ('pinkish purple', 0, [(300.0, 100.0, 100.0), (260.0, 100.0, 100.0)]), ('purple', 0, [(300.0, 100.0, 100.0), (260.0, 100.0, 100.0)]), ('pinkish', 0, [(300.0, 100.0, 100.0), (260.0, 100.0, 100.0)]), ('', 0, [(300.0, 100.0, 100.0), (260.0, 100.0, 100.0)])]
return triples_to_insts(data, listener=listener) |
class SimulationConverter(object):
class Log(object):
def __init__(self, quiet):
self.history = ''
self.quiet = quiet
def __call__(self, string):
if (not self.quiet):
print(string)
self.history += (string + '\n')
def __str__(self):
return str(self.history)
def __repr__(self):
return repr(self.history)
def __init__(self, modes=8, tolerance=1e-06, quiet=False):
import os
import time
import json
import platform
import numpy
import scipy
import h5py
import sxs
self.modes = modes
self.tolerance = tolerance
self.quiet = quiet
self.code_versions = f'''python=={platform.python_version()}
numpy=={numpy.version.version}
scipy=={scipy.version.full_version}
h5py=={h5py.version.version}
# h5py_api=={h5py.version.api_version}
# h5py_hdf5=={h5py.version.hdf5_version}
sxs=={sxs.__version__}
'''
self.command = f'''sxs.utilities.lvcnr.convert_simulation(
sxs_data_path={{sxs_data_path!r}},
out_path={{out_path!r}},
truncation_time={{truncation_time!r}},
resolution={{resolution!r}},
modes={modes!r},
tolerance={tolerance!r},
quiet={quiet!r}
)'''
if (modes == 'all'):
self.modes = [[l, m] for l in range(2, 9) for m in range((- l), (l + 1))]
elif (modes == '22only'):
self.modes = [[2, 2], [2, (- 2)]]
else:
l_max = int(modes)
self.modes = [[l, m] for l in range(2, (l_max + 1)) for m in range((- l), (l + 1))]
self.ell_max = max((lm[0] for lm in self.modes))
catalog = sxs.load('catalog')
self.sxs_catalog = {'simulations': catalog.simulations, 'records': catalog.records}
self.sxs_catalog_resolutions = sxs.zenodo.catalog.resolutions_for_simulations(self.sxs_catalog)
def convert(self, sxs_data_path, out_path, truncation_time=None, resolution=None, truncation_tol=None):
import os
import time
import json
import h5py
import sxs
from .metadata import sxs_id_from_alt_names, write_metadata_from_sxs
from .horizons import horizon_splines_from_sxs, write_horizon_splines_from_sxs
from .waveforms import convert_modes
log = self.Log(self.quiet)
log(self.command.format(sxs_data_path=sxs_data_path, out_path=out_path, truncation_time=truncation_time, resolution=resolution))
log(('Starting at ' + time.strftime('%H:%M%p %Z on %b %d, %Y')))
with open(os.path.join(sxs_data_path, 'metadata.json'), 'r') as f:
metadata = json.load(f)
if (resolution is None):
resolution = sxs.lev_number(sxs_data_path)
if (resolution is None):
raise ValueError('No `resolution` value found in input arguments or data path.')
sxs_id = sxs_id_from_alt_names(metadata['alternative_names'])
log(('Converting ' + sxs_id))
extrapolation_order = 'Extrapolated_N2'
log(('Extrapolation order: ' + extrapolation_order))
out_name = (((((out_path + '/') + sxs_id.replace(':', '_')) + '_Res') + str(resolution)) + '.h5')
log("Output filename is '{0}'".format(out_name))
(start_time, peak_time, version_hist) = convert_modes((sxs_data_path + '/rhOverM_Asymptotic_GeometricUnits_CoM.h5'), metadata, out_name, self.modes, extrapolation_order, log, truncation_time, tolerance=(self.tolerance / 2.0), truncation_tol=truncation_tol)
with h5py.File((sxs_data_path + '/Horizons.h5'), 'r') as horizons:
(horizon_splines_to_write, t_A, t_B, t_C) = horizon_splines_from_sxs(horizons, start_time, peak_time, log, truncation_tol=truncation_tol)
write_horizon_splines_from_sxs(out_name, horizon_splines_to_write, t_A, t_B, t_C, log)
write_metadata_from_sxs(out_name, resolution, metadata, self.sxs_catalog, self.sxs_catalog_resolutions, start_time, peak_time, self.ell_max, log)
with h5py.File(out_name, 'a') as out_file:
out_file['auxiliary-info'].create_dataset('CodeVersions.txt', data=self.code_versions)
if (version_hist is not None):
log('Writing VersionHist.ver')
out_file['auxiliary-info'].create_dataset('VersionHist.ver', data=version_hist)
else:
log('No VersionHist.ver found. Data being converted is version 0.')
log(('Finishing at ' + time.strftime('%H:%M%p %Z on %b %d, %Y')))
log('Writing log')
out_file['auxiliary-info'].create_dataset('ConversionLog.txt', data=log.history) |
class Keane(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([0.0] * self.N), ([10.0] * self.N)))
self.global_optimum = [[7., 7.]]
self.custom_bounds = [((- 1), 0.34), ((- 1), 0.34)]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
val = ((sin((x[0] - x[1])) ** 2) * (sin((x[0] + x[1])) ** 2))
return (val / sqrt(((x[0] ** 2) + (x[1] ** 2)))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.