code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import argparse
import logging
import os
import copy
from collections import defaultdict
from typing import List
import numpy as np
import torch
from sklearn import metrics
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data.dataloader import DataLoader
from training.models import RNNClassifier
from operator import add
import json
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, np.float32):
return obj.item()
return json.JSONEncoder.default(self, obj)
class ArgsStruct:
def __init__(self, **entries):
self.__dict__.update(entries)
def load_architecture(device: torch.device, args: argparse.Namespace):
model = RNNClassifier(arch=args.arch, static_input_size=args.static_input_size,
dynamic_input_size=args.dynamic_input_size,
static_embedding_size=args.static_embedding_size,
hidden_size=args.hidden_size, dropout=args.dropout, rnn_layers=args.rnn_layers,
bidirectional=args.bidirectional, use_attention=args.use_attention,
attention_type=args.attention_type, attention_fields=args.attention_fields,
device=device, fc_layers=args.fc_layers, use_prior_prob_label=args.use_prior_prob_label)
model.to(device)
return model
def deterministic(seed):
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
def evaluate(data_loader: DataLoader, models: List[torch.nn.Module], device: torch.device,
subset_name: str, criterion, logistic_threshold: float,
exp_dir: str, metric='accuracy', max_seq: int = -1, aggregate: str = 'add', aggregate_or_th: float = 0.5):
assert aggregate in ['add', 'or']
assert aggregate_or_th > 0 and aggregate_or_th <= 1
assert metric in ['accuracy', 'f1']
metric_name = metric
if metric == 'accuracy':
metric_score = metrics.accuracy_score
metric_args = {}
metric_other_name, metric_other_args, metric_other_score = 'f1', {'average': 'macro'}, metrics.f1_score
else:
metric_score = metrics.f1_score
metric_args = {'average': 'macro'}
metric_other_name, metric_other_args, metric_other_score = 'accuracy', {}, metrics.accuracy_score
# deterministic(seed)
# seed should NOT be used here (TODO review)
total = 0
loss_total = 0
[model.eval() for model in models]
with torch.no_grad():
predictions = [defaultdict(list) for _ in range(len(models))]
patient_ids_from_start = defaultdict(list)
corrects = defaultdict(list)
first = True
for model_idx, model in enumerate(models):
for data in data_loader:
patient_ids, static_data, dynamic_data, lengths, labels = data[0], data[1].to(device), \
data[2], data[3].to(device), \
data[4].to(device)
if max_seq != -1:
new_dynamic_data = []
for data in dynamic_data:
new_dynamic_data.append(data[len(data) - max_seq:] if len(data) > max_seq else data)
dynamic_data = new_dynamic_data
# TO FIX: the padding remove one sequence from the list!
dynamic_data_padded = pad_sequence(dynamic_data, batch_first=True, padding_value=0).to(device)
effective_lengths = torch.ones(dynamic_data_padded.shape[0]).long().to(device)
c_lengths = torch.tensor(list(range(dynamic_data_padded.shape[1]))).long().to(device)
outputs = torch.zeros(dynamic_data_padded.shape[0]).to(device)
hidden = model.init_hidden(dynamic_data_padded.shape[0])
max_seq_step = dynamic_data_padded.shape[1]
dynamic_data_history = torch.zeros(len(data[0]), dynamic_data_padded.shape[1],
model.hidden_size).to(device)
for seq_step in range(max_seq_step):
events = dynamic_data_padded[:, seq_step, :]
non_zero = (effective_lengths != 0).nonzero().squeeze()
static = static_data[non_zero]
lens = effective_lengths[non_zero]
evs = events[non_zero]
if len(lens.shape) != 1:
static = static.unsqueeze(dim=0)
lens = lens.unsqueeze(dim=0)
evs = evs.unsqueeze(dim=0)
evs = evs.unsqueeze(dim=1)
if model.arch != 'lstm':
if len(non_zero.shape) == 0:
outputs[non_zero], hidden[:, non_zero:non_zero + 1, :], dynamic_data_event, _, _ = model(
(static, evs,
lens, hidden, dynamic_data_history), seq_step)
else:
outputs[non_zero], hidden[:, non_zero, :], dynamic_data_event, _, _ = model(
(static, evs, lens,
hidden, dynamic_data_history), seq_step)
else:
outputs[non_zero], h, dynamic_data_event, _, _ = model(
(static, evs, lens, hidden, dynamic_data_history), seq_step)
if len(non_zero.shape) == 0:
hidden[0][:, non_zero:non_zero + 1, :] = h[0]
hidden[1][:, non_zero:non_zero + 1, :] = h[1]
else:
hidden[0][:, non_zero, :] = h[0]
hidden[1][:, non_zero, :] = h[1]
# append predictions
non_zero_indexes = non_zero.tolist() if isinstance(non_zero.tolist(), list) else [non_zero.tolist()]
# append predictions and patient ids from start (left-aligned sequences)
for pred_idx in non_zero_indexes:
pred = torch.sigmoid(outputs[pred_idx]).clone().data
pred_seq_len = lengths.tolist()[pred_idx] - 1
predictions[model_idx][seq_step].append(pred)
# furthermore, store the patient_ids for each step
pid = patient_ids[pred_idx]
patient_ids_from_start[seq_step].append(int(pid))
dynamic_data_history[:, seq_step, :] = dynamic_data_event
if first:
outs = labels[non_zero].clone().data.tolist()
outs = outs if isinstance(outs, list) else [outs]
for label in outs:
corrects[seq_step].append(label)
total += 1 if len(non_zero.shape) == 0 else len(non_zero)
if outputs[non_zero].size():
if criterion.__class__.__name__ == 'BCEWithLogitsLoss':
loss_total += criterion(outputs[non_zero].clone(), labels[non_zero].float())
else:
loss_total += criterion(torch.sigmoid(outputs[non_zero]).clone(), labels[non_zero].float())
effective_lengths = (c_lengths[seq_step] < lengths - 1).long()
first = False
loss_total /= len(models)
# compute predictions and from end (right-aligned sequences) using the sequence length for each prediction
max_steps = len(predictions[0].keys())
# Compute voted predictions
def aggregate_or(votes):
return (1 if len(list(filter(lambda x: x == 1, votes)))/len(votes) >= aggregate_or_th else 0,
sum(votes)/len(votes))
predicted = defaultdict(list)
predicted_probs = defaultdict(list)
for step in range(max_steps):
# for each step, sum the prediction of each model in the ensemble
preds_votes = []
if aggregate == 'add':
for model_idx in range(len(predictions)):
if len(preds_votes) == 0:
preds_votes = [pred.tolist() for pred in predictions[model_idx][step]]
else:
preds_votes = list(map(add, preds_votes, [pred.tolist() for pred in predictions[model_idx][step]]))
predicted[step] = [1 if pred >= logistic_threshold * len(models) else 0 for pred in preds_votes]
predicted_probs[step] = preds_votes
else:
preds_votes_to_aggregate = []
for model_idx in range(len(predictions)):
if len(preds_votes_to_aggregate) == 0:
preds_votes_to_aggregate = [pred.tolist() for pred in predictions[model_idx][step]]
preds_votes_to_aggregate = [[1 if pred >= logistic_threshold else 0 for pred in
preds_votes_to_aggregate]]
else:
new_votes = [pred.tolist() for pred in predictions[model_idx][step]]
new_votes = [1 if pred >= logistic_threshold else 0 for pred in new_votes]
preds_votes_to_aggregate.append(new_votes)
pred_probs_or = []
for idx_pred_ in range(len(preds_votes_to_aggregate[0])):
preds_votes.append(aggregate_or([preds[idx_pred_] for preds in preds_votes_to_aggregate]))
for idx_pred_vote, pred_vote in enumerate(preds_votes):
decision, probs = pred_vote
preds_votes[idx_pred_vote] = decision
pred_probs_or.append(probs)
predicted[step] = preds_votes
predicted_probs[step] = pred_probs_or
predictions = dict()
prediction_probs = dict()
labels = dict()
for step in predicted.keys():
lista_ids = patient_ids_from_start[step]
lista_labels = corrects[step]
lista_predicciones = predicted[step]
lista_probs = predicted_probs[step]
for id, label, prediction, prob in zip(lista_ids, lista_labels, lista_predicciones, lista_probs):
if step == 0:
predictions[id] = []
prediction_probs[id] = []
labels[id] = label
predictions[id].append(prediction)
prediction_probs[id].append(prob)
predicted_from_end = defaultdict(list)
predicted_probs_from_end = defaultdict(list)
patient_ids_from_end = defaultdict(list)
corrects_from_end = defaultdict(list)
predictions_copy = copy.deepcopy(predictions)
predictions_probs_copy = copy.deepcopy(prediction_probs)
for step in range(max_steps):
y_pred = []
y_pred_probs = []
y_true = []
patient_ids_step = []
for id in predictions_copy:
if len(predictions_copy[id]) > 0:
last_prediction = predictions_copy[id].pop()
y_pred.append(last_prediction)
y_pred_probs.append(predictions_probs_copy[id].pop())
y_true.append(labels[id])
patient_ids_step.append(id)
patient_ids_from_end[step] = patient_ids_step
predicted_from_end[step] = y_pred
predicted_probs_from_end[step] = y_pred_probs
corrects_from_end[step] = y_true
# write to disk predictions and corrects labels
eval_preds = {"predictions_from_start": predicted,
"predictions_from_end": predicted_from_end,
"patient_ids_from_start": patient_ids_from_start,
"patient_ids_from_end": patient_ids_from_end,
"predicted_probs_from_start": predicted_probs,
"predicted_probs_from_end": predicted_probs_from_end,
"labels": corrects,
"labels_from_end": corrects_from_end}
with open(os.path.join(exp_dir, 'eval_preds_' + subset_name + '.json'), 'w') as pn:
json.dump(eval_preds, pn, cls=NumpyEncoder)
# Compute evaluations metrics and write report
eval_metrics = {"from_start": defaultdict(), "from_end": defaultdict(),
f"{metric_name}_avg_weighted": defaultdict()}
for step in range(max_steps):
# mean over all the correct predictions at given step
assert (len(predicted[step]) == len(corrects[step]) and len(predicted_from_end[step]) == len(
corrects_from_end[step])), \
'number of labels different from number of predictions'
eval_metrics["from_start"][step] = {metric_name: metric_score(corrects[step], predicted[step], **metric_args),
metric_other_name: metric_other_score(corrects[step], predicted[step],
**metric_other_args),
"sensitivity": metrics.recall_score(corrects[step], predicted[step]),
"corrects":
f'{metrics.accuracy_score(corrects[step], predicted[step], normalize=False)}',
"examples":
f'{len(predicted[step])}'}
eval_metrics["from_end"][step] = {
metric_name: metric_score(corrects_from_end[step], predicted_from_end[step], **metric_args),
metric_other_name: metric_other_score(corrects_from_end[step], predicted_from_end[step],
**metric_other_args),
"sensitivity": metrics.recall_score(corrects_from_end[step], predicted_from_end[step]),
"corrects":
f'{metrics.accuracy_score(corrects_from_end[step], predicted_from_end[step], normalize=False)}',
"examples": f'{len(predicted_from_end[step])}'}
predicted_all_scores = []
for step in range(max_steps):
predicted_all_scores.extend(predicted_probs[step])
predicted_all = []
for step in range(max_steps):
predicted_all.extend(predicted[step])
predicted_all_from_end = []
for step in range(max_steps):
predicted_all_from_end.extend(predicted_from_end[step])
corrects_all = []
for step in range(max_steps):
corrects_all.extend(corrects[step])
corrects_all_from_end = []
for step in range(max_steps):
corrects_all_from_end.extend(corrects_from_end[step])
eval_metrics[f"{metric_name}_avg_weighted"] = metric_score(corrects_all, predicted_all, **metric_args)
eval_metrics[f"{metric_name}_avg_weighted_from_end"] = metric_score(corrects_all_from_end, predicted_all_from_end,
**metric_args)
eval_metrics[f"{metric_other_name}_avg_weighted"] = metric_other_score(corrects_all, predicted_all,
**metric_other_args)
eval_metrics[f"{metric_other_name}_avg_weighted_from_end"] = metric_other_score(corrects_all_from_end,
predicted_all_from_end,
**metric_other_args)
eval_metrics["auc"] = metrics.roc_auc_score(corrects_all, predicted_all_scores)
tn, fp, fn, tp = metrics.confusion_matrix(corrects_all, predicted_all).ravel()
specificity = tn / (tn + fp)
eval_metrics["sensitivity"] = metrics.recall_score(corrects_all, predicted_all)
eval_metrics["sensitivity_from_end"] = metrics.recall_score(corrects_all_from_end, predicted_all_from_end)
eval_metrics["specificity"] = specificity
# TODO: encapsulate the evaluation report in a function
eval_report = '\t'.join(['days from hospitalization',
'corrects',
'examples',
f'{metric_name} per day',
f'{metric_name} average weighted',
f'{metric_other_name} per day',
f'{metric_other_name} average weighted',
'sensitivity per day',
'sensitivity average'
])
for step in range(max_steps):
eval_report += '\t'.join([f'\n{step}',
f'{eval_metrics["from_start"][step]["corrects"]}',
f'{eval_metrics["from_start"][step]["examples"]}',
f'{eval_metrics["from_start"][step][f"{metric_name}"] * 100:.2f}%',
f'{eval_metrics[f"{metric_name}_avg_weighted"] * 100:.2f}%',
f'{eval_metrics["from_start"][step][f"{metric_other_name}"] * 100:.2f}%',
f'{eval_metrics[f"{metric_other_name}_avg_weighted"] * 100:.2f}%',
f'{eval_metrics["from_start"][step]["sensitivity"] * 100:.2f}%',
f'{eval_metrics["sensitivity"] * 100:.2f}%'
])
eval_report += '\n'
eval_report += '\t'.join(['days before discharge',
'corrects',
'examples',
f'{metric_name} per day',
f'{metric_name} average weighted',
f'{metric_other_name} per day',
f'{metric_other_name} average weighted',
'sensitivity per day',
'sensitivity average'])
for step in range(max_steps):
eval_report += '\t'.join([f'\n{step}',
f'{eval_metrics["from_end"][step]["corrects"]}',
f'{eval_metrics["from_end"][step]["examples"]}',
f'{eval_metrics["from_end"][step][f"{metric_name}"] * 100:.2f}%',
f'{eval_metrics[f"{metric_name}_avg_weighted_from_end"] * 100:.2f}%',
f'{eval_metrics["from_end"][step][f"{metric_other_name}"] * 100:.2f}%',
f'{eval_metrics[f"{metric_other_name}_avg_weighted_from_end"] * 100:.2f}%',
f'{eval_metrics["from_end"][step]["sensitivity"] * 100:.2f}%',
f'{eval_metrics["sensitivity_from_end"] * 100:.2f}%'])
logging.info(eval_report)
with open(os.path.join(exp_dir, 'eval_report_' + subset_name + '.csv'), 'w') as fn:
fn.writelines(eval_report)
logging.info(
f"{metric_name.upper()} GLOBAL {subset_name}: " + f'{eval_metrics[f"{metric_name}_avg_weighted"] * 100:.4f}%')
output_table = {
f'{metric_name}_avg_weighted': eval_metrics[f'{metric_name}_avg_weighted'],
f'{metric_other_name}_avg_weighted': eval_metrics[f'{metric_other_name}_avg_weighted'],
'sensitivity': eval_metrics['sensitivity'],
'specificity': eval_metrics['specificity'],
'auc': eval_metrics['auc']
}
return float(loss_total), output_table
| [
"torch.manual_seed",
"torch.ones",
"json.JSONEncoder.default",
"torch.sigmoid",
"os.path.join",
"torch.nn.utils.rnn.pad_sequence",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.recall_score",
"torch.no_grad",
"torch.zeros",
"collections.defaultdict",
"numpy.random.seed",
"copy.deepcopy",... | [((788, 1293), 'training.models.RNNClassifier', 'RNNClassifier', ([], {'arch': 'args.arch', 'static_input_size': 'args.static_input_size', 'dynamic_input_size': 'args.dynamic_input_size', 'static_embedding_size': 'args.static_embedding_size', 'hidden_size': 'args.hidden_size', 'dropout': 'args.dropout', 'rnn_layers': 'args.rnn_layers', 'bidirectional': 'args.bidirectional', 'use_attention': 'args.use_attention', 'attention_type': 'args.attention_type', 'attention_fields': 'args.attention_fields', 'device': 'device', 'fc_layers': 'args.fc_layers', 'use_prior_prob_label': 'args.use_prior_prob_label'}), '(arch=args.arch, static_input_size=args.static_input_size,\n dynamic_input_size=args.dynamic_input_size, static_embedding_size=args.\n static_embedding_size, hidden_size=args.hidden_size, dropout=args.\n dropout, rnn_layers=args.rnn_layers, bidirectional=args.bidirectional,\n use_attention=args.use_attention, attention_type=args.attention_type,\n attention_fields=args.attention_fields, device=device, fc_layers=args.\n fc_layers, use_prior_prob_label=args.use_prior_prob_label)\n', (801, 1293), False, 'from training.models import RNNClassifier\n'), ((1492, 1515), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1509, 1515), False, 'import torch\n'), ((1609, 1629), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1623, 1629), True, 'import numpy as np\n'), ((8097, 8114), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (8108, 8114), False, 'from collections import defaultdict\n'), ((8137, 8154), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (8148, 8154), False, 'from collections import defaultdict\n'), ((10681, 10698), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10692, 10698), False, 'from collections import defaultdict\n'), ((10730, 10747), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10741, 10747), False, 'from collections import defaultdict\n'), ((10775, 10792), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10786, 10792), False, 'from collections import defaultdict\n'), ((10817, 10834), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10828, 10834), False, 'from collections import defaultdict\n'), ((10858, 10884), 'copy.deepcopy', 'copy.deepcopy', (['predictions'], {}), '(predictions)\n', (10871, 10884), False, 'import copy\n'), ((10914, 10945), 'copy.deepcopy', 'copy.deepcopy', (['prediction_probs'], {}), '(prediction_probs)\n', (10927, 10945), False, 'import copy\n'), ((15622, 15679), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['corrects_all', 'predicted_all_scores'], {}), '(corrects_all, predicted_all_scores)\n', (15643, 15679), False, 'from sklearn import metrics\n'), ((15830, 15879), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['corrects_all', 'predicted_all'], {}), '(corrects_all, predicted_all)\n', (15850, 15879), False, 'from sklearn import metrics\n'), ((15923, 15990), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['corrects_all_from_end', 'predicted_all_from_end'], {}), '(corrects_all_from_end, predicted_all_from_end)\n', (15943, 15990), False, 'from sklearn import metrics\n'), ((18876, 18901), 'logging.info', 'logging.info', (['eval_report'], {}), '(eval_report)\n', (18888, 18901), False, 'import logging\n'), ((574, 609), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (598, 609), False, 'import json\n'), ((2640, 2655), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2653, 2655), False, 'import torch\n'), ((2760, 2777), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2771, 2777), False, 'from collections import defaultdict\n'), ((2798, 2815), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2809, 2815), False, 'from collections import defaultdict\n'), ((12243, 12286), 'json.dump', 'json.dump', (['eval_preds', 'pn'], {'cls': 'NumpyEncoder'}), '(eval_preds, pn, cls=NumpyEncoder)\n', (12252, 12286), False, 'import json\n'), ((12373, 12386), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (12384, 12386), False, 'from collections import defaultdict\n'), ((12400, 12413), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (12411, 12413), False, 'from collections import defaultdict\n'), ((12466, 12479), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (12477, 12479), False, 'from collections import defaultdict\n'), ((2680, 2697), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2691, 2697), False, 'from collections import defaultdict\n'), ((12161, 12221), 'os.path.join', 'os.path.join', (['exp_dir', "('eval_preds_' + subset_name + '.json')"], {}), "(exp_dir, 'eval_preds_' + subset_name + '.json')\n", (12173, 12221), False, 'import os\n'), ((13186, 13239), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['corrects[step]', 'predicted[step]'], {}), '(corrects[step], predicted[step])\n', (13206, 13239), False, 'from sklearn import metrics\n'), ((13904, 13975), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['corrects_from_end[step]', 'predicted_from_end[step]'], {}), '(corrects_from_end[step], predicted_from_end[step])\n', (13924, 13975), False, 'from sklearn import metrics\n'), ((15701, 15754), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['corrects_all', 'predicted_all'], {}), '(corrects_all, predicted_all)\n', (15725, 15754), False, 'from sklearn import metrics\n'), ((18916, 18976), 'os.path.join', 'os.path.join', (['exp_dir', "('eval_report_' + subset_name + '.csv')"], {}), "(exp_dir, 'eval_report_' + subset_name + '.csv')\n", (18928, 18976), False, 'import os\n'), ((13348, 13420), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['corrects[step]', 'predicted[step]'], {'normalize': '(False)'}), '(corrects[step], predicted[step], normalize=False)\n', (13370, 13420), False, 'from sklearn import metrics\n'), ((14020, 14114), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['corrects_from_end[step]', 'predicted_from_end[step]'], {'normalize': '(False)'}), '(corrects_from_end[step], predicted_from_end[step],\n normalize=False)\n', (14042, 14114), False, 'from sklearn import metrics\n'), ((3624, 3685), 'torch.nn.utils.rnn.pad_sequence', 'pad_sequence', (['dynamic_data'], {'batch_first': '(True)', 'padding_value': '(0)'}), '(dynamic_data, batch_first=True, padding_value=0)\n', (3636, 3685), False, 'from torch.nn.utils.rnn import pad_sequence\n'), ((3921, 3962), 'torch.zeros', 'torch.zeros', (['dynamic_data_padded.shape[0]'], {}), '(dynamic_data_padded.shape[0])\n', (3932, 3962), False, 'import torch\n'), ((3734, 3774), 'torch.ones', 'torch.ones', (['dynamic_data_padded.shape[0]'], {}), '(dynamic_data_padded.shape[0])\n', (3744, 3774), False, 'import torch\n'), ((6372, 6404), 'torch.sigmoid', 'torch.sigmoid', (['outputs[pred_idx]'], {}), '(outputs[pred_idx])\n', (6385, 6404), False, 'import torch\n'), ((7515, 7547), 'torch.sigmoid', 'torch.sigmoid', (['outputs[non_zero]'], {}), '(outputs[non_zero])\n', (7528, 7547), False, 'import torch\n')] |
import os
import torch
import numpy as np
import json
import dgl
import constants
def read_partitions_file(part_file):
"""
Utility method to read metis partitions, which is the output of
pm_dglpart2
Parameters:
-----------
part_file : string
file name which is the output of metis partitioning
algorithm (pm_dglpart2, in the METIS installation).
This function expects each line in `part_file` to be formatted as
<global_nid> <part_id>
and the contents of this file are sorted by <global_nid>.
Returns:
--------
numpy array
array of part_ids and the idx is the <global_nid>
"""
partitions_map = np.loadtxt(part_file, delimiter=' ', dtype=np.int64)
#as a precaution sort the lines based on the <global_nid>
partitions_map = partitions_map[partitions_map[:,0].argsort()]
return partitions_map[:,1]
def read_json(json_file):
"""
Utility method to read a json file schema
Parameters:
-----------
json_file : string
file name for the json schema
Returns:
--------
dictionary, as serialized in the json_file
"""
with open(json_file) as schema:
val = json.load(schema)
return val
def get_node_types(schema):
"""
Utility method to extract node_typename -> node_type mappings
as defined by the input schema
Parameters:
-----------
schema : dictionary
Input schema from which the node_typename -> node_type
dictionary is created.
Returns:
--------
dictionary, list
dictionary with ntype <-> type_nid mappings
list of ntype strings
"""
#Get the node_id ranges from the schema
global_nid_ranges = schema['nid']
global_nid_ranges = {key: np.array(global_nid_ranges[key]).reshape(1,2)
for key in global_nid_ranges}
#Create an array with the starting id for each node_type and sort
ntypes = [(key, global_nid_ranges[key][0,0]) for key in global_nid_ranges]
ntypes.sort(key=lambda e: e[1])
#Create node_typename -> node_type dictionary
ntypes = [e[0] for e in ntypes]
ntypes_map = {e: i for i, e in enumerate(ntypes)}
return ntypes_map, ntypes
def write_metadata_json(metadata_list, output_dir, graph_name):
"""
Merge json schema's from each of the rank's on rank-0.
This utility function, to be used on rank-0, to create aggregated json file.
Parameters:
-----------
metadata_list : list of json (dictionaries)
a list of json dictionaries to merge on rank-0
output_dir : string
output directory path in which results are stored (as a json file)
graph-name : string
a string specifying the graph name
"""
#Initialize global metadata
graph_metadata = {}
#Merge global_edge_ids from each json object in the input list
edge_map = {}
x = metadata_list[0]["edge_map"]
for k in x:
edge_map[k] = []
for idx in range(len(metadata_list)):
edge_map[k].append(metadata_list[idx]["edge_map"][k][0])
graph_metadata["edge_map"] = edge_map
graph_metadata["etypes"] = metadata_list[0]["etypes"]
graph_metadata["graph_name"] = metadata_list[0]["graph_name"]
graph_metadata["halo_hops"] = metadata_list[0]["halo_hops"]
#Merge global_nodeids from each of json object in the input list
node_map = {}
x = metadata_list[0]["node_map"]
for k in x:
node_map[k] = []
for idx in range(len(metadata_list)):
node_map[k].append(metadata_list[idx]["node_map"][k][0])
graph_metadata["node_map"] = node_map
graph_metadata["ntypes"] = metadata_list[0]["ntypes"]
graph_metadata["num_edges"] = sum([metadata_list[i]["num_edges"] for i in range(len(metadata_list))])
graph_metadata["num_nodes"] = sum([metadata_list[i]["num_nodes"] for i in range(len(metadata_list))])
graph_metadata["num_parts"] = metadata_list[0]["num_parts"]
graph_metadata["part_method"] = metadata_list[0]["part_method"]
for i in range(len(metadata_list)):
graph_metadata["part-{}".format(i)] = metadata_list[i]["part-{}".format(i)]
with open('{}/{}.json'.format(output_dir, graph_name), 'w') as outfile:
json.dump(graph_metadata, outfile, sort_keys=True, indent=4)
def augment_edge_data(edge_data, part_ids):
"""
Add partition-id (rank which owns an edge) column to the edge_data.
Parameters:
-----------
edge_data : numpy ndarray
Edge information as read from the xxx_edges.txt file
part_ids : numpy array
array of part_ids indexed by global_nid
"""
edge_data[constants.OWNER_PROCESS] = part_ids[edge_data[constants.GLOBAL_DST_ID]]
def augment_node_data(node_data, part_ids):
"""
Utility function to add auxilary columns to the node_data numpy ndarray.
Parameters:
-----------
node_data : numpy ndarray
Node information as read from xxx_nodes.txt file
part_ids : numpy array
array of part_ids indexed by global_nid
"""
#add global_nids to the node_data
global_nids = np.arange(len(node_data[constants.GLOBAL_TYPE_NID]), dtype=np.int64)
node_data[constants.GLOBAL_NID] = global_nids
#add owner proc_ids to the node_data
proc_ids = part_ids[node_data[constants.GLOBAL_NID]]
node_data[constants.OWNER_PROCESS] = proc_ids
def read_nodes_file(nodes_file):
"""
Utility function to read xxx_nodes.txt file
Parameters:
-----------
nodesfile : string
Graph file for nodes in the input graph
Returns:
--------
dictionary
Nodes data stored in dictionary where keys are column names
and values are the columns from the numpy ndarray as read from the
xxx_nodes.txt file
"""
if nodes_file == "" or nodes_file == None:
return None
# Read the file from here.
# Assuming the nodes file is a numpy file
# nodes.txt file is of the following format
# <node_type> <weight1> <weight2> <weight3> <weight4> <global_type_nid> <attributes>
# For the ogb-mag dataset, nodes.txt is of the above format.
nodes_data = np.loadtxt(nodes_file, delimiter=' ', dtype='int64')
nodes_datadict = {}
nodes_datadict[constants.NTYPE_ID] = nodes_data[:,0]
nodes_datadict[constants.GLOBAL_TYPE_NID] = nodes_data[:,5]
return nodes_datadict
def read_edges_file(edge_file, edge_data_dict):
"""
Utility function to read xxx_edges.txt file
Parameters:
-----------
edge_file : string
Graph file for edges in the input graph
Returns:
--------
dictionary
edge data as read from xxx_edges.txt file and columns are stored
in a dictionary with key-value pairs as column-names and column-data.
"""
if edge_file == "" or edge_file == None:
return None
#Read the file from here.
#<global_src_id> <global_dst_id> <type_eid> <etype> <attributes>
# global_src_id -- global idx for the source node ... line # in the graph_nodes.txt
# global_dst_id -- global idx for the destination id node ... line # in the graph_nodes.txt
edge_data = np.loadtxt(edge_file , delimiter=' ', dtype = 'int64')
if (edge_data_dict == None):
edge_data_dict = {}
edge_data_dict[constants.GLOBAL_SRC_ID] = edge_data[:,0]
edge_data_dict[constants.GLOBAL_DST_ID] = edge_data[:,1]
edge_data_dict[constants.GLOBAL_TYPE_EID] = edge_data[:,2]
edge_data_dict[constants.ETYPE_ID] = edge_data[:,3]
else:
edge_data_dict[constants.GLOBAL_SRC_ID] = \
np.concatenate((edge_data_dict[constants.GLOBAL_SRC_ID], edge_data[:,0]))
edge_data_dict[constants.GLOBAL_DST_ID] = \
np.concatenate((edge_data_dict[constants.GLOBAL_DST_ID], edge_data[:,1]))
edge_data_dict[constants.GLOBAL_TYPE_EID] = \
np.concatenate((edge_data_dict[constants.GLOBAL_TYPE_EID], edge_data[:,2]))
edge_data_dict[constants.ETYPE_ID] = \
np.concatenate((edge_data_dict[constants.ETYPE_ID], edge_data[:,3]))
return edge_data_dict
def read_node_features_file(nodes_features_file):
"""
Utility function to load tensors from a file
Parameters:
-----------
nodes_features_file : string
Features file for nodes in the graph
Returns:
--------
dictionary
mappings between ntype and list of features
"""
node_features = dgl.data.utils.load_tensors(nodes_features_file, False)
return node_features
def read_edge_features_file(edge_features_file):
"""
Utility function to load tensors from a file
Parameters:
-----------
edge_features_file : string
Features file for edges in the graph
Returns:
--------
dictionary
mappings between etype and list of features
"""
edge_features = dgl.data.utils.load_tensors(edge_features_file, True)
return edge_features
def write_node_features(node_features, node_file):
"""
Utility function to serialize node_features in node_file file
Parameters:
-----------
node_features : dictionary
dictionary storing ntype <-> list of features
node_file : string
File in which the node information is serialized
"""
dgl.data.utils.save_tensors(node_file, node_features)
def write_edge_features(edge_features, edge_file):
"""
Utility function to serialize edge_features in edge_file file
Parameters:
-----------
edge_features : dictionary
dictionary storing etype <-> list of features
edge_file : string
File in which the edge information is serialized
"""
dgl.data.utils.save_tensors(edge_file, edge_features)
def write_graph_dgl(graph_file, graph_obj):
"""
Utility function to serialize graph dgl objects
Parameters:
-----------
graph_obj : dgl graph object
graph dgl object, as created in convert_partition.py, which is to be serialized
graph_file : string
File name in which graph object is serialized
"""
dgl.save_graphs(graph_file, [graph_obj])
def write_dgl_objects(graph_obj, node_features, edge_features, output_dir, part_id):
"""
Wrapper function to create dgl objects for graph, node-features and edge-features
graph_obj : dgl object
graph dgl object as created in convert_partition.py file
node_features : dgl object
Tensor data for node features
edge_features : dgl object
Tensor data for edge features
"""
part_dir = output_dir + '/part' + str(part_id)
os.makedirs(part_dir, exist_ok=True)
write_graph_dgl(os.path.join(part_dir ,'part'+str(part_id)), graph_obj)
if node_features != None:
write_node_features(node_features, os.path.join(part_dir, "node_feat.dgl"))
if (edge_features != None):
write_edge_features(edge_features, os.path.join(part_dir, "edge_feat.dgl"))
| [
"os.makedirs",
"dgl.data.utils.load_tensors",
"os.path.join",
"dgl.save_graphs",
"numpy.array",
"dgl.data.utils.save_tensors",
"numpy.concatenate",
"json.load",
"numpy.loadtxt",
"json.dump"
] | [((692, 744), 'numpy.loadtxt', 'np.loadtxt', (['part_file'], {'delimiter': '""" """', 'dtype': 'np.int64'}), "(part_file, delimiter=' ', dtype=np.int64)\n", (702, 744), True, 'import numpy as np\n'), ((6200, 6252), 'numpy.loadtxt', 'np.loadtxt', (['nodes_file'], {'delimiter': '""" """', 'dtype': '"""int64"""'}), "(nodes_file, delimiter=' ', dtype='int64')\n", (6210, 6252), True, 'import numpy as np\n'), ((7202, 7253), 'numpy.loadtxt', 'np.loadtxt', (['edge_file'], {'delimiter': '""" """', 'dtype': '"""int64"""'}), "(edge_file, delimiter=' ', dtype='int64')\n", (7212, 7253), True, 'import numpy as np\n'), ((8502, 8557), 'dgl.data.utils.load_tensors', 'dgl.data.utils.load_tensors', (['nodes_features_file', '(False)'], {}), '(nodes_features_file, False)\n', (8529, 8557), False, 'import dgl\n'), ((8923, 8976), 'dgl.data.utils.load_tensors', 'dgl.data.utils.load_tensors', (['edge_features_file', '(True)'], {}), '(edge_features_file, True)\n', (8950, 8976), False, 'import dgl\n'), ((9343, 9396), 'dgl.data.utils.save_tensors', 'dgl.data.utils.save_tensors', (['node_file', 'node_features'], {}), '(node_file, node_features)\n', (9370, 9396), False, 'import dgl\n'), ((9739, 9792), 'dgl.data.utils.save_tensors', 'dgl.data.utils.save_tensors', (['edge_file', 'edge_features'], {}), '(edge_file, edge_features)\n', (9766, 9792), False, 'import dgl\n'), ((10143, 10183), 'dgl.save_graphs', 'dgl.save_graphs', (['graph_file', '[graph_obj]'], {}), '(graph_file, [graph_obj])\n', (10158, 10183), False, 'import dgl\n'), ((10661, 10697), 'os.makedirs', 'os.makedirs', (['part_dir'], {'exist_ok': '(True)'}), '(part_dir, exist_ok=True)\n', (10672, 10697), False, 'import os\n'), ((1216, 1233), 'json.load', 'json.load', (['schema'], {}), '(schema)\n', (1225, 1233), False, 'import json\n'), ((4272, 4332), 'json.dump', 'json.dump', (['graph_metadata', 'outfile'], {'sort_keys': '(True)', 'indent': '(4)'}), '(graph_metadata, outfile, sort_keys=True, indent=4)\n', (4281, 4332), False, 'import json\n'), ((7652, 7726), 'numpy.concatenate', 'np.concatenate', (['(edge_data_dict[constants.GLOBAL_SRC_ID], edge_data[:, 0])'], {}), '((edge_data_dict[constants.GLOBAL_SRC_ID], edge_data[:, 0]))\n', (7666, 7726), True, 'import numpy as np\n'), ((7790, 7864), 'numpy.concatenate', 'np.concatenate', (['(edge_data_dict[constants.GLOBAL_DST_ID], edge_data[:, 1])'], {}), '((edge_data_dict[constants.GLOBAL_DST_ID], edge_data[:, 1]))\n', (7804, 7864), True, 'import numpy as np\n'), ((7930, 8006), 'numpy.concatenate', 'np.concatenate', (['(edge_data_dict[constants.GLOBAL_TYPE_EID], edge_data[:, 2])'], {}), '((edge_data_dict[constants.GLOBAL_TYPE_EID], edge_data[:, 2]))\n', (7944, 8006), True, 'import numpy as np\n'), ((8065, 8134), 'numpy.concatenate', 'np.concatenate', (['(edge_data_dict[constants.ETYPE_ID], edge_data[:, 3])'], {}), '((edge_data_dict[constants.ETYPE_ID], edge_data[:, 3]))\n', (8079, 8134), True, 'import numpy as np\n'), ((10848, 10887), 'os.path.join', 'os.path.join', (['part_dir', '"""node_feat.dgl"""'], {}), "(part_dir, 'node_feat.dgl')\n", (10860, 10887), False, 'import os\n'), ((10965, 11004), 'os.path.join', 'os.path.join', (['part_dir', '"""edge_feat.dgl"""'], {}), "(part_dir, 'edge_feat.dgl')\n", (10977, 11004), False, 'import os\n'), ((1791, 1823), 'numpy.array', 'np.array', (['global_nid_ranges[key]'], {}), '(global_nid_ranges[key])\n', (1799, 1823), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 <NAME> <<EMAIL>>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
"""
import numpy as np
import scipy.optimize
from wavestate import declarative
from .beam_param import ComplexBeamParam
from wavestate.utilities.mpl import (
mplfigB,
)
from . import utils
class QFit(declarative.OverridableObject):
@declarative.dproperty
def wavelength_m(self, val):
return val
m2 = 1.00
@declarative.mproperty
def D_um(self, arg):
arg = np.array(arg)
return arg
@declarative.mproperty
def R_m(self, arg=declarative.NOARG):
if arg is declarative.NOARG:
arg = self.D_um * 1e-6 / 2
else:
arg = np.array(arg)
return arg
@declarative.mproperty
def Z_in(self, arg):
arg = np.array(arg)
return arg
@declarative.mproperty
def Z_m(self, arg=declarative.NOARG):
if arg is declarative.NOARG:
arg = self.Z_in * 0.0254
else:
arg = np.array(arg)
return arg
@declarative.mproperty
def Z0_ZR_init(self, arg=declarative.NOARG):
if arg is declarative.NOARG:
idx_W0 = np.argsort(self.R_m)
W0 = self.R_m[idx_W0[0]] * 1
ZR = np.pi * W0 ** 2 / (self.wavelength_m) / (self.m2)
Z0 = -np.mean(self.Z_m[idx_W0[:4]])
arg = (Z0, ZR)
return arg
def waist_func(self, z, z_0, z_R):
return (
self.m2 * self.wavelength_m / (np.pi * z_R) * ((z + z_0) ** 2 + z_R ** 2)
) ** 0.5
no_prefit = False
@declarative.mproperty
def Z0_ZR_fit(self):
idx_W0 = np.argmin(self.R_m)
init = self.Z0_ZR_init
# do a prefit to try and find tiny waists using a subset of the data
if (
idx_W0 > 1
and idx_W0 < len(self.R_m) - 1
and not self.no_prefit
and len(self.R_m) > 3
):
# don't include the point
if idx_W0 < len(self.R_m) / 2:
idx_W0 += 1
# ignore the actual point itself as it may be across a gap
init, hess = scipy.optimize.curve_fit(
self.waist_func,
self.Z_m[idx_W0:],
self.R_m[idx_W0:],
p0=self.Z0_ZR_init,
)
else:
init, hess = scipy.optimize.curve_fit(
self.waist_func,
self.Z_m[:idx_W0],
self.R_m[:idx_W0],
p0=self.Z0_ZR_init,
)
(z0, zR), hess = scipy.optimize.curve_fit(
self.waist_func, self.Z_m, self.R_m, p0=init
)
return (z0, zR)
def waist_func_m2(self, z, z_0, z_R, m2):
return (
abs(m2) * self.wavelength_m / (np.pi * z_R) * ((z + z_0) ** 2 + z_R ** 2)
) ** 0.5
@declarative.mproperty
def Z0_ZR_m2_fit(self):
idx_W0 = np.argmin(self.R_m)
init = self.Z0_ZR_init + (self.m2,)
# do a prefit to try and find tiny waists using a subset of the data
if (
idx_W0 > 1
and idx_W0 < len(self.R_m) - 1
and not self.no_prefit
and len(self.R_m) > 3
):
# don't include the point
if idx_W0 < len(self.R_m) / 2:
idx_W0 += 1
# ignore the actual point itself as it may be across a gap
init, hess = scipy.optimize.curve_fit(
self.waist_func_m2,
self.Z_m[idx_W0:],
self.R_m[idx_W0:],
p0=self.Z0_ZR_fit + (self.m2,),
)
else:
init, hess = scipy.optimize.curve_fit(
self.waist_func_m2,
self.Z_m[:idx_W0],
self.R_m[:idx_W0],
p0=self.Z0_ZR_fit + (self.m2,),
)
(z0, zR, m2), hess = scipy.optimize.curve_fit(
self.waist_func_m2, self.Z_m, self.R_m, p0=init
)
return (z0, zR, m2)
@declarative.mproperty
def q_fit(self):
return ComplexBeamParam.from_Z_ZR(
self.Z0_ZR_fit[0],
self.Z0_ZR_fit[1],
wavelength_m=self.wavelength_m,
)
@declarative.mproperty
def q_fit_m2(self):
return ComplexBeamParam.from_Z_ZR(
self.Z0_ZR_m2_fit[0],
self.Z0_ZR_m2_fit[1],
wavelength_m=self.wavelength_m,
)
@declarative.mproperty
def q_init(self, initval=None):
if initval is None:
return ComplexBeamParam.from_Z_ZR(
self.Z0_ZR_init[0],
self.Z0_ZR_init[1],
wavelength_m=self.wavelength_m,
)
else:
return initval
def rep(self, place_in=0):
print(self.Z0_ZR_fit)
try:
print(
"ComplexBeamParam.from_Z_ZR({0}, {1}, wavelen = {2})".format(
self.Z0_ZR_fit[0] + place_in * 0.0254,
self.Z0_ZR_fit[1],
self.wavelength_m * 1e9,
)
)
except Exception as e:
print(e)
def plot(
self,
with_init=False,
with_m2fit=True,
with_fit=True,
):
F = mplfigB()
diff = max(self.Z_m) - min(self.Z_m)
Z_pts = np.linspace(min(self.Z_m) - diff / 8, max(self.Z_m) + diff / 8, 100)
if int(self.wavelength_m) == 1064:
color_pts = "red"
color_fit = "orange"
color_init = "purple"
elif int(self.wavelength_m) == 532:
color_pts = "blue"
color_fit = "green"
color_init = "purple"
else:
color_pts = "blue"
color_fit = "black"
color_init = "purple"
F.ax0.scatter(
self.Z_in,
self.D_um,
color=color_pts,
label="data",
)
fit_label = u"Fit: $Z_0$ = {Zm} = {Zin:.1f}in\nW0={W0} D0={D0}\nZR={ZR}\n $M^2$={m2:.2f} (L<{MML:.1f}%)".format(
Zm=utils.str_m(-self.q_fit.Z, d=3),
Zin=-self.q_fit.Z / 0.0254,
ZR=utils.str_m(self.q_fit.ZR, d=4),
W0=utils.str_m(self.q_fit.W0, d=4),
D0=utils.str_m(2 * self.q_fit.W0, d=4),
m2=self.m2,
MML=(self.m2 - 1) / 7 * 100,
)
if with_fit:
F.ax0.plot(
Z_pts / 0.0254,
self.m2 ** 0.5 * 2 * 1e6 * self.q_fit.propagate_distance(Z_pts).W,
color=color_fit,
label=fit_label,
)
if with_m2fit:
fit_label_m2 = u"Fit: $Z_0$ = {Zm} = {Zin:.1f}in\nW0={W0} D0={D0}\nZR={ZR}\n $M^2$={m2:.2f} (L<{MML:.1f}%, $L^*$={XL:.1f}%)".format(
Zm=utils.str_m(-self.q_fit_m2.Z, d=3),
Zin=-self.q_fit_m2.Z / 0.0254,
ZR=utils.str_m(self.q_fit_m2.ZR, d=4),
W0=utils.str_m(self.q_fit_m2.W0, d=4),
D0=utils.str_m(2 * self.q_fit.W0, d=4),
m2=self.Z0_ZR_m2_fit[2],
MML=(1 - (1 - (self.Z0_ZR_m2_fit[2] - 1) / 4) ** 2) * 100,
XL=(1 - abs(self.q_fit_m2.overlap_LG(self.q_fit)) ** 2) * 100,
)
# https://www.dataray.com/blogs/dataray-blog/m-sup2-and-high-order-modes
F.ax0.plot(
Z_pts / 0.0254,
self.Z0_ZR_m2_fit[2] ** 0.5
* 2
* 1e6
* self.q_fit_m2.propagate_distance(Z_pts).W,
color=color_fit,
label=fit_label_m2,
ls="--",
)
F.ax0.plot(
Z_pts / 0.0254,
2 * 1e6 * self.q_fit_m2.propagate_distance(Z_pts).W,
color=color_fit,
label="embedded HG00 in m2 fit",
ls=":",
)
if with_init:
F.ax0.plot(
Z_pts / 0.0254,
2 * 1e6 * self.q_init.propagate_distance(Z_pts).W,
color=color_init,
label="Initial",
)
F.ax0.set_xlabel("Inches from reference")
F.ax0.set_ylabel("2σ intensity\ndiameter[μm]")
F.ax0.set_title(
"Beam Parameter Fit (at {0:.0f}nm)".format(self.wavelength_m * 1e9)
)
F.ax0.legend(loc="best")
return F
| [
"numpy.mean",
"wavestate.utilities.mpl.mplfigB",
"numpy.argsort",
"numpy.array",
"numpy.argmin"
] | [((755, 768), 'numpy.array', 'np.array', (['arg'], {}), '(arg)\n', (763, 768), True, 'import numpy as np\n'), ((1066, 1079), 'numpy.array', 'np.array', (['arg'], {}), '(arg)\n', (1074, 1079), True, 'import numpy as np\n'), ((1920, 1939), 'numpy.argmin', 'np.argmin', (['self.R_m'], {}), '(self.R_m)\n', (1929, 1939), True, 'import numpy as np\n'), ((3247, 3266), 'numpy.argmin', 'np.argmin', (['self.R_m'], {}), '(self.R_m)\n', (3256, 3266), True, 'import numpy as np\n'), ((5647, 5656), 'wavestate.utilities.mpl.mplfigB', 'mplfigB', ([], {}), '()\n', (5654, 5656), False, 'from wavestate.utilities.mpl import mplfigB\n'), ((966, 979), 'numpy.array', 'np.array', (['arg'], {}), '(arg)\n', (974, 979), True, 'import numpy as np\n'), ((1275, 1288), 'numpy.array', 'np.array', (['arg'], {}), '(arg)\n', (1283, 1288), True, 'import numpy as np\n'), ((1443, 1463), 'numpy.argsort', 'np.argsort', (['self.R_m'], {}), '(self.R_m)\n', (1453, 1463), True, 'import numpy as np\n'), ((1591, 1620), 'numpy.mean', 'np.mean', (['self.Z_m[idx_W0[:4]]'], {}), '(self.Z_m[idx_W0[:4]])\n', (1598, 1620), True, 'import numpy as np\n')] |
import scipy.misc as misc
import torch
import copy
import torchvision.models as models
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
# FCN Net model class for semantic segmentation
##############################################This is a standart FCN with the lat layer split into prediction of binary map for every class########################################################################333
class Net(nn.Module):
######################################Build FCN net layer##################################################################################
def __init__(self, CatDict):
# Generate standart FCN PSP net for image segmentation with only image as input
# --------------Build layers for standart FCN with only image as input------------------------------------------------------
super(Net, self).__init__()
# ---------------Load pretrained Resnet 50 encoder----------------------------------------------------------
self.Encoder = models.resnet101(pretrained=True)
# ---------------Create Pyramid Scene Parsing PSP layer -------------------------------------------------------------------------
self.PSPScales = [1, 1 / 2, 1 / 4, 1 / 8]
self.PSPLayers = nn.ModuleList() # [] # Layers for decoder
for Ps in self.PSPScales:
self.PSPLayers.append(nn.Sequential(
nn.Conv2d(2048, 1024, stride=1, kernel_size=3, padding=1, bias=True)))
# nn.BatchNorm2d(1024)))
self.PSPSqueeze = nn.Sequential(
nn.Conv2d(4096, 512, stride=1, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, stride=1, kernel_size=3, padding=0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU()
)
# ------------------Skip conncetion layers for upsampling-----------------------------------------------------------------------------
self.SkipConnections = nn.ModuleList()
self.SkipConnections.append(nn.Sequential(
nn.Conv2d(1024, 512, stride=1, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU()))
self.SkipConnections.append(nn.Sequential(
nn.Conv2d(512, 256, stride=1, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(256),
nn.ReLU()))
self.SkipConnections.append(nn.Sequential(
nn.Conv2d(256, 256, stride=1, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(256),
nn.ReLU()))
# ------------------Skip squeeze applied to the (concat of upsample+skip conncection layers)-----------------------------------------------------------------------------
self.SqueezeUpsample = nn.ModuleList()
self.SqueezeUpsample.append(nn.Sequential(
nn.Conv2d(1024, 512, stride=1, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU()))
self.SqueezeUpsample.append(nn.Sequential(
nn.Conv2d(256 + 512, 256, stride=1, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(256),
nn.ReLU()))
self.SqueezeUpsample.append(nn.Sequential(
nn.Conv2d(256 + 256, 256, stride=1, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(256),
nn.ReLU()))
# ----------------Final prediction layers------------------------------------------------------------------------------------------
self.OutLayersList =nn.ModuleList()
self.OutLayersDict={}
for f,nm in enumerate(CatDict):
self.OutLayersDict[nm]= nn.Conv2d(256, 2, stride=1, kernel_size=3, padding=1, bias=False)
self.OutLayersList.append(self.OutLayersDict[nm])
##########################################Run inference################################################################################################################
def forward(self,Images,UseGPU=True,TrainMode=True, FreezeBatchNormStatistics=False):
#----------------------Convert image to pytorch and normalize values-----------------------------------------------------------------
RGBMean = [123.68,116.779,103.939]
RGBStd = [65,65,65]
if TrainMode:
tp=torch.FloatTensor
else:
self.half()
tp=torch.HalfTensor
# self.eval()
InpImages = torch.autograd.Variable(torch.from_numpy(Images.astype(float)), requires_grad=False).transpose(2,3).transpose(1, 2).type(tp)
if FreezeBatchNormStatistics==True: self.eval()
#---------------Convert to cuda gpu or CPU -------------------------------------------------------------------------------------------------------------------
if UseGPU:
InpImages=InpImages.cuda()
self.cuda()
else:
self=self.cpu()
self.float()
InpImages=InpImages.type(torch.float).cpu()
#----------------Normalize image values-----------------------------------------------------------------------------------------------------------
for i in range(len(RGBMean)): InpImages[:, i, :, :]=(InpImages[:, i, :, :]-RGBMean[i])/RGBStd[i] # normalize image values
x=InpImages
#--------------------------------------------------------------------------------------------------------------------------
SkipConFeatures=[] # Store features map of layers used for skip connection
#---------------Run Encoder first layer-----------------------------------------------------------------------------------------------------
x = self.Encoder.conv1(x)
x = self.Encoder.bn1(x)
#-------------------------Run remaining encoder layer------------------------------------------------------------------------------------------
x = self.Encoder.relu(x)
x = self.Encoder.maxpool(x)
x = self.Encoder.layer1(x)
SkipConFeatures.append(x)
x = self.Encoder.layer2(x)
SkipConFeatures.append(x)
x = self.Encoder.layer3(x)
SkipConFeatures.append(x)
x = self.Encoder.layer4(x)
#------------------Run psp Layers----------------------------------------------------------------------------------------------
PSPSize=(x.shape[2],x.shape[3]) # Size of the original features map
PSPFeatures=[] # Results of various of scaled procceessing
for i,PSPLayer in enumerate(self.PSPLayers): # run PSP layers scale features map to various of sizes apply convolution and concat the results
NewSize=(np.array(PSPSize)*self.PSPScales[i]).astype(np.int)
if NewSize[0] < 1: NewSize[0] = 1
if NewSize[1] < 1: NewSize[1] = 1
# print(str(i)+")"+str(NewSize))
y = nn.functional.interpolate(x, tuple(NewSize), mode='bilinear')
#print(y.shape)
y = PSPLayer(y)
y = nn.functional.interpolate(y, PSPSize, mode='bilinear')
# if np.min(PSPSize*self.ScaleRates[i])<0.4: y*=0
PSPFeatures.append(y)
x=torch.cat(PSPFeatures,dim=1)
x=self.PSPSqueeze(x)
#----------------------------Upsample features map and combine with layers from encoder using skip connection-----------------------------------------------------------------------------------------------------------
for i in range(len(self.SkipConnections)):
sp=(SkipConFeatures[-1-i].shape[2],SkipConFeatures[-1-i].shape[3])
x=nn.functional.interpolate(x,size=sp,mode='bilinear') #Resize
x = torch.cat((self.SkipConnections[i](SkipConFeatures[-1-i]),x), dim=1)
x = self.SqueezeUpsample[i](x)
# print([i])
# print(self.SqueezeUpsample[i][0].weight.sum())
#---------------------------------Final prediction-------------------------------------------------------------------------------
self.OutProbDict = {}
self.OutLbDict = {}
# print("=====================================================")
#===============Run prediction for each class as binary mask========================================================================================
for nm in self.OutLayersDict:
# print(nm)
# print((self.OutLayersDict[nm].weight.mean().cpu().detach().numpy()))
l=self.OutLayersDict[nm](x)
# l = self.OutLayersDict[nm](x) # Make prediction per pixel
l = nn.functional.interpolate(l,size=InpImages.shape[2:4],mode='bilinear') # Resize to original image size
Prob = F.softmax(l, dim=1) # Calculate class probability per pixel
tt, Labels = l.max(1) # Find label per pixel
self.OutProbDict[nm]=Prob
self.OutLbDict[nm] = Labels
#********************************************************************************************************
return self.OutProbDict,self.OutLbDict
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.ModuleList",
"torchvision.models.resnet101",
"torch.nn.Conv2d",
"numpy.array",
"torch.nn.functional.interpolate",
"torch.nn.functional.softmax",
"torch.cat"
] | [((1029, 1062), 'torchvision.models.resnet101', 'models.resnet101', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1045, 1062), True, 'import torchvision.models as models\n'), ((1289, 1304), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1302, 1304), True, 'import torch.nn as nn\n'), ((2094, 2109), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2107, 2109), True, 'import torch.nn as nn\n'), ((2943, 2958), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2956, 2958), True, 'import torch.nn as nn\n'), ((3765, 3780), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3778, 3780), True, 'import torch.nn as nn\n'), ((7715, 7744), 'torch.cat', 'torch.cat', (['PSPFeatures'], {'dim': '(1)'}), '(PSPFeatures, dim=1)\n', (7724, 7744), False, 'import torch\n'), ((1616, 1684), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4096)', '(512)'], {'stride': '(1)', 'kernel_size': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(4096, 512, stride=1, kernel_size=1, padding=0, bias=False)\n', (1625, 1684), True, 'import torch.nn as nn\n'), ((1702, 1721), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (1716, 1721), True, 'import torch.nn as nn\n'), ((1739, 1748), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1746, 1748), True, 'import torch.nn as nn\n'), ((1766, 1833), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)'], {'stride': '(1)', 'kernel_size': '(3)', 'padding': '(0)', 'bias': '(False)'}), '(512, 512, stride=1, kernel_size=3, padding=0, bias=False)\n', (1775, 1833), True, 'import torch.nn as nn\n'), ((1851, 1870), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (1865, 1870), True, 'import torch.nn as nn\n'), ((1888, 1897), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1895, 1897), True, 'import torch.nn as nn\n'), ((3904, 3969), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(2)'], {'stride': '(1)', 'kernel_size': '(3)', 'padding': '(1)', 'bias': '(False)'}), '(256, 2, stride=1, kernel_size=3, padding=1, bias=False)\n', (3913, 3969), True, 'import torch.nn as nn\n'), ((7526, 7580), 'torch.nn.functional.interpolate', 'nn.functional.interpolate', (['y', 'PSPSize'], {'mode': '"""bilinear"""'}), "(y, PSPSize, mode='bilinear')\n", (7551, 7580), True, 'import torch.nn as nn\n'), ((8164, 8218), 'torch.nn.functional.interpolate', 'nn.functional.interpolate', (['x'], {'size': 'sp', 'mode': '"""bilinear"""'}), "(x, size=sp, mode='bilinear')\n", (8189, 8218), True, 'import torch.nn as nn\n'), ((9218, 9290), 'torch.nn.functional.interpolate', 'nn.functional.interpolate', (['l'], {'size': 'InpImages.shape[2:4]', 'mode': '"""bilinear"""'}), "(l, size=InpImages.shape[2:4], mode='bilinear')\n", (9243, 9290), True, 'import torch.nn as nn\n'), ((9348, 9367), 'torch.nn.functional.softmax', 'F.softmax', (['l'], {'dim': '(1)'}), '(l, dim=1)\n', (9357, 9367), True, 'import torch.nn.functional as F\n'), ((2181, 2249), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(512)'], {'stride': '(1)', 'kernel_size': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(1024, 512, stride=1, kernel_size=1, padding=0, bias=False)\n', (2190, 2249), True, 'import torch.nn as nn\n'), ((2267, 2286), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (2281, 2286), True, 'import torch.nn as nn\n'), ((2304, 2313), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2311, 2313), True, 'import torch.nn as nn\n'), ((2387, 2454), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)'], {'stride': '(1)', 'kernel_size': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(512, 256, stride=1, kernel_size=1, padding=0, bias=False)\n', (2396, 2454), True, 'import torch.nn as nn\n'), ((2472, 2491), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (2486, 2491), True, 'import torch.nn as nn\n'), ((2509, 2518), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2516, 2518), True, 'import torch.nn as nn\n'), ((2592, 2659), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'stride': '(1)', 'kernel_size': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(256, 256, stride=1, kernel_size=1, padding=0, bias=False)\n', (2601, 2659), True, 'import torch.nn as nn\n'), ((2677, 2696), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (2691, 2696), True, 'import torch.nn as nn\n'), ((2714, 2723), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2721, 2723), True, 'import torch.nn as nn\n'), ((3030, 3098), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(512)'], {'stride': '(1)', 'kernel_size': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(1024, 512, stride=1, kernel_size=1, padding=0, bias=False)\n', (3039, 3098), True, 'import torch.nn as nn\n'), ((3116, 3135), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (3130, 3135), True, 'import torch.nn as nn\n'), ((3153, 3162), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3160, 3162), True, 'import torch.nn as nn\n'), ((3236, 3309), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 + 512)', '(256)'], {'stride': '(1)', 'kernel_size': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(256 + 512, 256, stride=1, kernel_size=1, padding=0, bias=False)\n', (3245, 3309), True, 'import torch.nn as nn\n'), ((3327, 3346), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (3341, 3346), True, 'import torch.nn as nn\n'), ((3364, 3373), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3371, 3373), True, 'import torch.nn as nn\n'), ((3447, 3520), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256 + 256)', '(256)'], {'stride': '(1)', 'kernel_size': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(256 + 256, 256, stride=1, kernel_size=1, padding=0, bias=False)\n', (3456, 3520), True, 'import torch.nn as nn\n'), ((3538, 3557), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (3552, 3557), True, 'import torch.nn as nn\n'), ((3575, 3584), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3582, 3584), True, 'import torch.nn as nn\n'), ((1443, 1511), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2048)', '(1024)'], {'stride': '(1)', 'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(2048, 1024, stride=1, kernel_size=3, padding=1, bias=True)\n', (1452, 1511), True, 'import torch.nn as nn\n'), ((7116, 7133), 'numpy.array', 'np.array', (['PSPSize'], {}), '(PSPSize)\n', (7124, 7133), True, 'import numpy as np\n')] |
import argparse
import numpy as np
from util import load_data, separate_data
from grakel import GraphKernel
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
def convert(graphs):
X = []
y = []
for graph in graphs:
edge = {i: neighbors for i, neighbors in enumerate(graph.neighbors)}
node_label = {i: label for i, label in enumerate(graph.node_tags)}
X.append([edge, node_label])
y.append(graph.label)
return X, y
def main():
# Training settings
parser = argparse.ArgumentParser(description='WL subtree kernel')
parser.add_argument('--dataset',
type=str,
default="MUTAG",
help='name of dataset (default: MUTAG)')
parser.add_argument(
'--seed',
type=int,
default=0,
help='random seed for splitting the dataset into 10 (default: 0)')
parser.add_argument(
'--fold_idx',
type=int,
default=0,
help='the index of fold in 10-fold validation. Should be less then 10.'
)
parser.add_argument('--iter',
type=int,
default=5,
help='Number of iteration for the WL')
parser.add_argument('--normalize',
action="store_true",
help='normalize the feature or not')
parser.add_argument('--filename', type=str, default="", help='output file')
args = parser.parse_args()
np.random.seed(0)
graphs, num_classes = load_data(args.dataset, False)
##10-fold cross validation, consider the particular fold.
train_graphs, test_graphs = separate_data(graphs, args.seed, args.fold_idx)
#SVM hyper-parameter to tune
C_list = [0.01, 0.1, 1, 10, 100]
X_train, y_train = convert(train_graphs)
X_test, y_test = convert(test_graphs)
wl_kernel = GraphKernel(kernel=[{
"name": "weisfeiler_lehman",
"niter": args.iter
}, {
"name": "subtree_wl"
}],
normalize=args.normalize)
K_train = wl_kernel.fit_transform(X_train)
K_test = wl_kernel.transform(X_test)
train_acc = []
test_acc = []
for C in C_list:
clf = SVC(kernel='precomputed', C=C)
clf.fit(K_train, y_train)
y_pred_test = clf.predict(K_test)
y_pred_train = clf.predict(K_train)
train_acc.append(accuracy_score(y_train, y_pred_train) * 100)
test_acc.append(accuracy_score(y_test, y_pred_test) * 100)
print(train_acc)
print(test_acc)
if not args.filename == "":
np.savetxt(args.filename, np.array([train_acc, test_acc]).transpose())
if __name__ == '__main__':
main() | [
"util.load_data",
"argparse.ArgumentParser",
"util.separate_data",
"numpy.array",
"grakel.GraphKernel",
"numpy.random.seed",
"sklearn.metrics.accuracy_score",
"sklearn.svm.SVC"
] | [((535, 591), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""WL subtree kernel"""'}), "(description='WL subtree kernel')\n", (558, 591), False, 'import argparse\n'), ((1521, 1538), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1535, 1538), True, 'import numpy as np\n'), ((1565, 1595), 'util.load_data', 'load_data', (['args.dataset', '(False)'], {}), '(args.dataset, False)\n', (1574, 1595), False, 'from util import load_data, separate_data\n'), ((1691, 1738), 'util.separate_data', 'separate_data', (['graphs', 'args.seed', 'args.fold_idx'], {}), '(graphs, args.seed, args.fold_idx)\n', (1704, 1738), False, 'from util import load_data, separate_data\n'), ((1914, 2040), 'grakel.GraphKernel', 'GraphKernel', ([], {'kernel': "[{'name': 'weisfeiler_lehman', 'niter': args.iter}, {'name': 'subtree_wl'}]", 'normalize': 'args.normalize'}), "(kernel=[{'name': 'weisfeiler_lehman', 'niter': args.iter}, {\n 'name': 'subtree_wl'}], normalize=args.normalize)\n", (1925, 2040), False, 'from grakel import GraphKernel\n'), ((2261, 2291), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""precomputed"""', 'C': 'C'}), "(kernel='precomputed', C=C)\n", (2264, 2291), False, 'from sklearn.svm import SVC\n'), ((2437, 2474), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_train', 'y_pred_train'], {}), '(y_train, y_pred_train)\n', (2451, 2474), False, 'from sklearn.metrics import accuracy_score\n'), ((2506, 2541), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred_test'], {}), '(y_test, y_pred_test)\n', (2520, 2541), False, 'from sklearn.metrics import accuracy_score\n'), ((2658, 2689), 'numpy.array', 'np.array', (['[train_acc, test_acc]'], {}), '([train_acc, test_acc])\n', (2666, 2689), True, 'import numpy as np\n')] |
import logging
from gensim.models import word2vec
import numpy as np
from scipy import linalg
'''
计算案件之间的相似度
首先对句子分词,然后获取每个单词对应的词向量
然后将所有单词对应的词向量相加求平均值,作为句子的向量
最后,计算句子的向量的夹角余弦值,作为它们之间的相似度
'''
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
# 使用gensim中的word2vec模块
sentences = word2vec.LineSentence('案件.txt')
model = word2vec.Word2Vec(sentences, hs=1, min_count=1, window=5, vector_size=64)
# req_count = 5
# for key in model.wv.similar_by_word('被告人', topn=100):
# if len(key[0]) == 3:
# req_count -= 1
# print(key[0], key[1])
# if req_count == 0:
# break
def sentence_vector(s):
'''
将所有单词的词向量相加求平均值,得到的向量即为句子的向量
'''
words = s.split(" ")
v = np.zeros(64)
for word in words:
if word !="":
v += model.wv[word]
v /= len(words)
return v
def vector_similarity(s1, s2):
'''
计算两个句子之间的相似度:将两个向量的夹角余弦值作为其相似度
'''
v1, v2 = sentence_vector(s1), sentence_vector(s2)
return np.dot(v1, v2) / (linalg.norm(v1) * linalg.norm(v2))
with open("案件.txt", "r", encoding="utf-8") as f:
contents = f.readlines()
matrix = np.zeros((len(contents), len(contents)))
for i in range(len(contents)):
for j in range(len(contents)):
# 使用矩阵存储所有案件之间的相似度
matrix[i][j] = vector_similarity(
contents[i].strip(), contents[j].strip())
f1 = open("result.txt", "w", encoding="utf-8")
for j in range(len(contents)):
# 获取最为相似的案件
# 注意:每个案件与自己的相似度为1,因此获取的是相似度第二大的案件
index = np.argsort(matrix[j])[-2]
f1.writelines("案件" + str(j + 1) + ":" + '\t')
f1.writelines(contents[j])
f1.writelines("案件" + str(index + 1) + ":" + '\t')
f1.writelines(contents[index])
f1.writelines("相似度: " + str(matrix[j][index]) + '\n\n')
| [
"logging.basicConfig",
"gensim.models.word2vec.Word2Vec",
"numpy.argsort",
"numpy.dot",
"numpy.zeros",
"gensim.models.word2vec.LineSentence",
"scipy.linalg.norm"
] | [((224, 319), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.INFO)\n", (243, 319), False, 'import logging\n'), ((361, 392), 'gensim.models.word2vec.LineSentence', 'word2vec.LineSentence', (['"""案件.txt"""'], {}), "('案件.txt')\n", (382, 392), False, 'from gensim.models import word2vec\n'), ((401, 474), 'gensim.models.word2vec.Word2Vec', 'word2vec.Word2Vec', (['sentences'], {'hs': '(1)', 'min_count': '(1)', 'window': '(5)', 'vector_size': '(64)'}), '(sentences, hs=1, min_count=1, window=5, vector_size=64)\n', (418, 474), False, 'from gensim.models import word2vec\n'), ((791, 803), 'numpy.zeros', 'np.zeros', (['(64)'], {}), '(64)\n', (799, 803), True, 'import numpy as np\n'), ((1063, 1077), 'numpy.dot', 'np.dot', (['v1', 'v2'], {}), '(v1, v2)\n', (1069, 1077), True, 'import numpy as np\n'), ((1081, 1096), 'scipy.linalg.norm', 'linalg.norm', (['v1'], {}), '(v1)\n', (1092, 1096), False, 'from scipy import linalg\n'), ((1099, 1114), 'scipy.linalg.norm', 'linalg.norm', (['v2'], {}), '(v2)\n', (1110, 1114), False, 'from scipy import linalg\n'), ((1625, 1646), 'numpy.argsort', 'np.argsort', (['matrix[j]'], {}), '(matrix[j])\n', (1635, 1646), True, 'import numpy as np\n')] |
import functools
import warnings
import numpy as np
import numpy.linalg as npla
import sys, os
import time
from PES.compute_covariance import *
from PES.initial_sample import *
from PES.hyper_samples import *
from PES.utilities import *
from PES.sample_minimum import *
from PES.PES import *
from PES.compute_posterior import *
from PES.EP import *
from PES.global_optimization import *
from PES.target_function import *
#The function to run PES to minimize the target function.
#Parameters: @target_function: the obejective function we want to minimize
# @x_minimum: the lower bounds for each dimension
# @x_maximum: the upper bounds for each dimension
# @dimension: the dimensions of the objective function
# @number_of_hyperparameter_sets: the number of the samples of the hyperparameters of the kernel we want to draw.
# It is the M defined in the paper.
# @number_of_burnin: number of burnins
# @sampling_method: the method used to sample the posterior distribution of the hyperparameters. User can choose
# 'mcmc' or 'hmc'.
# @number_of_initial_points: the number of samples we want to use as initial observations
# @number_of_experiments: number of experiments we want to run. For each experiment, we use different randomizations
# for starting points.
# @number_of_iterations: number of iterations we want to run for each experiment
# @number_of_features: the number of features that we would like to use for feature mapping. It is the "m" in the paper.
# @optimization_method: optimization method used when calling global_optimization function. User can choose any method
# specified in the scipy.optimize.minimize
# @seed: seed specified for randomization
def run_PES(target_function, x_minimum, x_maximum, dimension, number_of_hyperparameter_sets = 100, number_of_burnin = 50, \
sampling_method = 'mcmc', number_of_initial_points = 3, number_of_experiments = 1, number_of_iterations = 60, \
number_of_features = 1000, optimization_method = 'SLSQP', seed = None):
warnings.filterwarnings('ignore')
check_result_file_exist()
if seed is not None:
np.random.seed(seed)
#For Hartmann6
x_min = x_minimum
x_max = x_maximum
target = target_function
#For Branin-Hoo
#x_min = np.asarray([0.0,0.0])
#x_max = np.asarray([1.0,1.0])
#target = Branin_Hoo
d = dimension
num_of_hyperSets_initial = number_of_hyperparameter_sets
number_burn = number_of_burnin
sample_method = sampling_method
bnds = get_bounds(x_min, x_max)
opt_method = 'L-BFGS-B'
#We obtain three random samples
num_initial_points = number_of_initial_points
final_result = []
for pp in range(number_of_experiments):
write_header_to_files(pp)
warnings.filterwarnings('ignore')
Xsamples = initial_samples(x_min, x_max, num_initial_points)
write_data_to_file("Xsamples.txt", Xsamples)
#Guesses first stores the initilized guesses
guesses = Xsamples
write_data_to_file("guesses.txt", guesses)
Ysamples = np.zeros((Xsamples.shape[0]))
for i in range(Xsamples.shape[0]):
Ysamples[i] = target(Xsamples[i,:])
Ysamples = np.asarray([Ysamples])
Ysamples = Ysamples.T
print('Best so far in the initial data ' + str((min(Ysamples))[0]))
write_data_to_file("Ysamples.txt", Ysamples)
#We sample from the posterior distribution of the hyper-parameters
with hide_prints():
noise, l, sigma = sample_hypers(Xsamples, Ysamples, d, 0.3, num_of_hyperSets_initial, number_burn, sample_method, seed)
#global_minimum = target(np.array([(5-np.pi)/15,12.275/15]))
#global_minimum = target(np.array([0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573]))
valid_evaluation = 1
log10_scale_vec = []
for g in range(number_of_iterations):
print('PES, ' + str(pp) + 'th job, ' + str(g) + 'th iteration')
start_1 = time.time()
num_of_hyperSets = num_of_hyperSets_initial
Xsamples_count_before = len(Xsamples)
Ysamples_count_before = len(Ysamples)
guesses_count_before = len(guesses)
initial_point = guesses[-1,:]
num_of_features = number_of_features
num_of_obser = len(Ysamples)
x_minimum_vec = []
K_vec = []
K_star_min_vec = []
K_plus_W_tilde_inverse_vec = []
m_f_minimum_vec = []
v_f_minimum_vec = []
c_and_m_vec = []
opt_method = 'L-BFGS-B'
warnings.filterwarnings("error")
valid_num_hyperSets = 0
for j in range(num_of_hyperSets):
opt_method = 'L-BFGS-B'
try:
result = sample_min_with_randFeatures(num_of_features, d, Xsamples, Ysamples, sigma[j], l[j], noise[j], initial_point, opt_method, False, bnds)
x_minimum = result.x
x_minimum_vec.append(x_minimum)
if opt_method == 'L-BFGS-B':
hess_at_min_inverse = result.hess_inv.todense()
else:
hess_at_min_inverse = result.hess_inv
hess_at_min = compute_inverse(hess_at_min_inverse)
value_of_nObservations = (Ysamples.T)[0]
K, K_star_min, K_plus_W_tilde_inverse, m_f_minimum, v_f_minimum, c_and_m = Expectation_Propagation(Xsamples, value_of_nObservations, num_of_obser, x_minimum, d, l[j,:], sigma[j], noise[j], hess_at_min)
K_vec.append(K)
K_star_min_vec.append(K_star_min)
K_plus_W_tilde_inverse_vec.append(K_plus_W_tilde_inverse)
m_f_minimum_vec.append(m_f_minimum)
v_f_minimum_vec.append(v_f_minimum)
c_and_m_vec.append(c_and_m)
valid_num_hyperSets = valid_num_hyperSets + 1
except:
pass
num_of_hyperSets = valid_num_hyperSets
opt_method = optimization_method
warnings.filterwarnings("error")
PES_fail = False
try:
PES = functools.partial(PES_aquisition_function_multi, Xsamples = Xsamples, x_minimum = x_minimum_vec, l_vec = l, \
sigma = sigma, noise = noise, K = K_vec, K_star_min = K_star_min_vec, \
K_plus_W_tilde_inverse = K_plus_W_tilde_inverse_vec, \
m_f_minimum = m_f_minimum_vec, v_f_minimum = v_f_minimum_vec, c_and_m = c_and_m_vec, \
num_of_hyperSets = num_of_hyperSets)
ret = global_optimization(PES, d, x_min, x_max, gradient = None, gridsize = 500, stored_min_guesses = None, \
using_grid = True, optimize_method = opt_method, maxiter = 2000, bnds = bnds)
optimum = np.array(ret.x)
optimum_value = np.array([target(optimum)])
except:
print('PES falied')
PES_fail = True
pass
if PES_fail:
warnings.filterwarnings('ignore')
with hide_prints():
noise, l, sigma = sample_hypers(Xsamples, Ysamples, d, 0.3, num_of_hyperSets_initial, number_burn, sample_method, seed)
print('return back due to PES fail')
continue
Xsamples = np.vstack((Xsamples, optimum))
Ysamples = np.vstack((Ysamples, optimum_value))
end_1 = time.time()
print('PES takes ' + str(end_1 - start_1) + ' seconds')
print('PES suggests: ')
print(optimum)
start_2 = time.time()
#We sample from the posterior distribution of the hyper-parameters
warnings.filterwarnings('ignore')
num_of_hyperSets = num_of_hyperSets_initial
try:
with hide_prints():
noise, l, sigma = sample_hypers(Xsamples, Ysamples, d, 0.3, num_of_hyperSets_initial, number_burn, sample_method, seed)
except:
if len(Xsamples) > Xsamples_count_before:
Xsamples = Xsamples[:-1,:]
if len(Ysamples) > Ysamples_count_before:
Ysamples = Ysamples[:-1]
print('Sampling hyperparameters of posterior GP failed')
continue
end_2 = time.time()
print('Retraining the model takes '+ str(end_2 - start_2) + ' seconds')
write_data_to_file("Xsamples.txt", optimum)
write_data_to_file("Ysamples.txt", optimum_value)
start_3 = time.time()
K_plus_I_inverse_vec = []
num_of_obser = len(Xsamples)
for w in range(num_of_hyperSets):
K_plus_I_inverse = covNobeservations(Xsamples, num_of_obser, sigma[w], noise[w], l[w]) + sigma[w]*10**(-10)*np.eye((num_of_obser))
K_plus_I_inverse_vec.append(np.array(K_plus_I_inverse))
warnings.filterwarnings("error")
try:
pos_mean_function = functools.partial(posterior_mean_given_nObservations, X_nObservations = Xsamples, value_of_nObservations = Ysamples, \
K_plus_I_inverse = K_plus_I_inverse_vec, l = l, sigma = sigma, \
num_of_hyperSets = num_of_hyperSets)
pos_mean_grad_function = functools.partial(posterior_gradient_given_nObservations, X_nObservations = Xsamples, value_of_nObservations = Ysamples, \
K_plus_I_inverse = K_plus_I_inverse_vec, l = l, sigma = sigma, \
num_of_hyperSets = num_of_hyperSets, d = d)
ret_pos = global_optimization(pos_mean_function, d, x_min, x_max, gradient = pos_mean_grad_function, gridsize = 500, \
stored_min_guesses = None, using_grid = True, optimize_method = opt_method, \
maxiter = 2000, bnds = bnds)
except:
if len(Xsamples) > Xsamples_count_before:
Xsamples = Xsamples[:-1,:]
if len(Ysamples) > Ysamples_count_before:
Ysamples = Ysamples[:-1]
print('Find the minimum of posterior mean failed')
continue
pos_optimum = np.array(ret_pos.x)
write_data_to_file("guesses.txt", pos_optimum)
current_value = target(pos_optimum)
if current_value < (min(Ysamples))[0]:
print('The recommended point ' + str(pos_optimum))
else:
current_value = (min(Ysamples))[0]
print('The recommended point ' + str(Xsamples[np.argmin(Ysamples)]))
end_3 = time.time()
print('Recommending the point takes '+ str(end_3 - start_3) + ' seconds')
print('Best so far ' + str(current_value))
guesses = np.vstack((guesses, pos_optimum))
| [
"numpy.eye",
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"functools.partial",
"numpy.random.seed",
"numpy.vstack",
"numpy.argmin",
"time.time",
"warnings.filterwarnings"
] | [((2329, 2362), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (2352, 2362), False, 'import warnings\n'), ((2431, 2451), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2445, 2451), True, 'import numpy as np\n'), ((3120, 3153), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (3143, 3153), False, 'import warnings\n'), ((3436, 3463), 'numpy.zeros', 'np.zeros', (['Xsamples.shape[0]'], {}), '(Xsamples.shape[0])\n', (3444, 3463), True, 'import numpy as np\n'), ((3583, 3605), 'numpy.asarray', 'np.asarray', (['[Ysamples]'], {}), '([Ysamples])\n', (3593, 3605), True, 'import numpy as np\n'), ((4418, 4429), 'time.time', 'time.time', ([], {}), '()\n', (4427, 4429), False, 'import time\n'), ((5059, 5091), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (5082, 5091), False, 'import warnings\n'), ((6645, 6677), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (6668, 6677), False, 'import warnings\n'), ((8147, 8177), 'numpy.vstack', 'np.vstack', (['(Xsamples, optimum)'], {}), '((Xsamples, optimum))\n', (8156, 8177), True, 'import numpy as np\n'), ((8202, 8238), 'numpy.vstack', 'np.vstack', (['(Ysamples, optimum_value)'], {}), '((Ysamples, optimum_value))\n', (8211, 8238), True, 'import numpy as np\n'), ((8260, 8271), 'time.time', 'time.time', ([], {}), '()\n', (8269, 8271), False, 'import time\n'), ((8445, 8456), 'time.time', 'time.time', ([], {}), '()\n', (8454, 8456), False, 'import time\n'), ((8550, 8583), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (8573, 8583), False, 'import warnings\n'), ((9195, 9206), 'time.time', 'time.time', ([], {}), '()\n', (9204, 9206), False, 'import time\n'), ((9457, 9468), 'time.time', 'time.time', ([], {}), '()\n', (9466, 9468), False, 'import time\n'), ((9835, 9867), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (9858, 9867), False, 'import warnings\n'), ((11326, 11345), 'numpy.array', 'np.array', (['ret_pos.x'], {}), '(ret_pos.x)\n', (11334, 11345), True, 'import numpy as np\n'), ((11769, 11780), 'time.time', 'time.time', ([], {}), '()\n', (11778, 11780), False, 'import time\n'), ((11949, 11982), 'numpy.vstack', 'np.vstack', (['(guesses, pos_optimum)'], {}), '((guesses, pos_optimum))\n', (11958, 11982), True, 'import numpy as np\n'), ((6755, 7101), 'functools.partial', 'functools.partial', (['PES_aquisition_function_multi'], {'Xsamples': 'Xsamples', 'x_minimum': 'x_minimum_vec', 'l_vec': 'l', 'sigma': 'sigma', 'noise': 'noise', 'K': 'K_vec', 'K_star_min': 'K_star_min_vec', 'K_plus_W_tilde_inverse': 'K_plus_W_tilde_inverse_vec', 'm_f_minimum': 'm_f_minimum_vec', 'v_f_minimum': 'v_f_minimum_vec', 'c_and_m': 'c_and_m_vec', 'num_of_hyperSets': 'num_of_hyperSets'}), '(PES_aquisition_function_multi, Xsamples=Xsamples,\n x_minimum=x_minimum_vec, l_vec=l, sigma=sigma, noise=noise, K=K_vec,\n K_star_min=K_star_min_vec, K_plus_W_tilde_inverse=\n K_plus_W_tilde_inverse_vec, m_f_minimum=m_f_minimum_vec, v_f_minimum=\n v_f_minimum_vec, c_and_m=c_and_m_vec, num_of_hyperSets=num_of_hyperSets)\n', (6772, 7101), False, 'import functools\n'), ((7564, 7579), 'numpy.array', 'np.array', (['ret.x'], {}), '(ret.x)\n', (7572, 7579), True, 'import numpy as np\n'), ((7801, 7834), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (7824, 7834), False, 'import warnings\n'), ((9926, 10140), 'functools.partial', 'functools.partial', (['posterior_mean_given_nObservations'], {'X_nObservations': 'Xsamples', 'value_of_nObservations': 'Ysamples', 'K_plus_I_inverse': 'K_plus_I_inverse_vec', 'l': 'l', 'sigma': 'sigma', 'num_of_hyperSets': 'num_of_hyperSets'}), '(posterior_mean_given_nObservations, X_nObservations=\n Xsamples, value_of_nObservations=Ysamples, K_plus_I_inverse=\n K_plus_I_inverse_vec, l=l, sigma=sigma, num_of_hyperSets=num_of_hyperSets)\n', (9943, 10140), False, 'import functools\n'), ((10291, 10519), 'functools.partial', 'functools.partial', (['posterior_gradient_given_nObservations'], {'X_nObservations': 'Xsamples', 'value_of_nObservations': 'Ysamples', 'K_plus_I_inverse': 'K_plus_I_inverse_vec', 'l': 'l', 'sigma': 'sigma', 'num_of_hyperSets': 'num_of_hyperSets', 'd': 'd'}), '(posterior_gradient_given_nObservations, X_nObservations=\n Xsamples, value_of_nObservations=Ysamples, K_plus_I_inverse=\n K_plus_I_inverse_vec, l=l, sigma=sigma, num_of_hyperSets=\n num_of_hyperSets, d=d)\n', (10308, 10519), False, 'import functools\n'), ((9792, 9818), 'numpy.array', 'np.array', (['K_plus_I_inverse'], {}), '(K_plus_I_inverse)\n', (9800, 9818), True, 'import numpy as np\n'), ((9724, 9744), 'numpy.eye', 'np.eye', (['num_of_obser'], {}), '(num_of_obser)\n', (9730, 9744), True, 'import numpy as np\n'), ((11711, 11730), 'numpy.argmin', 'np.argmin', (['Ysamples'], {}), '(Ysamples)\n', (11720, 11730), True, 'import numpy as np\n')] |
import pandas as pd
from hydroDL.data import usgs, gageII, gridMET, ntn, GLASS, transform, dbBasin
import numpy as np
import matplotlib.pyplot as plt
from hydroDL.post import axplot, figplot
from hydroDL import kPath, utils
import json
import os
import importlib
from hydroDL.master import basinFull
from hydroDL.app.waterQuality import WRTDS
DF = dbBasin.DataFrameBasin('G200')
codeLst = usgs.newC
# LSTM
ep = 500
trainSet = 'rmR20'
testSet = 'pkR20'
label = 'QFPRT2C'
dataNameLst = ['G200', 'G200N']
corrLst1 = list()
corrLst2 = list()
for dataName in dataNameLst:
outName = '{}-{}-{}'.format(dataName, label, trainSet)
outFolder = basinFull.nameFolder(outName)
corrName1 = 'corr-{}-Ep{}.npy'.format(trainSet, ep)
corrName2 = 'corr-{}-Ep{}.npy'.format(testSet, ep)
corrFile1 = os.path.join(outFolder, corrName1)
corrFile2 = os.path.join(outFolder, corrName2)
corrL1 = np.load(corrFile1)
corrL2 = np.load(corrFile2)
corrLst1.append(corrL1)
corrLst2.append(corrL2)
# WRTDS
dirWRTDS = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-dbBasin')
corrName1 = 'corr-{}-{}-{}.npy'.format('G200N', trainSet, testSet)
corrName2 = 'corr-{}-{}-{}.npy'.format('G200N', testSet, testSet)
corrFile1 = os.path.join(dirWRTDS, corrName1)
corrFile2 = os.path.join(dirWRTDS, corrName2)
corrW1 = np.load(corrFile1)
corrW2 = np.load(corrFile2)
# count
matB = (~np.isnan(DF.c)).astype(int).astype(float)
matB1 = DF.extractSubset(matB, trainSet)
matB2 = DF.extractSubset(matB, testSet)
count1 = np.nansum(matB1, axis=0)
count2 = np.nansum(matB2, axis=0)
matRm = (count1 < 160) & (count2 < 40)
for corr in [corrW1, corrW2]+corrLst1+corrLst2:
corr[matRm] = np.nan
# load linear/seasonal
dirPar = r'C:\Users\geofk\work\waterQuality\modelStat\LR-All\QS\param'
matLR = np.full([len(DF.siteNoLst), len(codeLst)], np.nan)
for k, code in enumerate(codeLst):
filePar = os.path.join(dirPar, code)
dfCorr = pd.read_csv(filePar, dtype={'siteNo': str}).set_index('siteNo')
matLR[:, k] = dfCorr['rsq'].values
matLR[matRm] = np.nan
#
a = np.nanmean(matLR, axis=0)
b = np.nanmean(corrLst2[0]**2 - corrW2**2, axis=0)
c = np.nanmean(corrLst2[1]**2 - corrW2**2, axis=0)
fig, ax = plt.subplots(1, 1)
for k in range(len(codeLst)):
ax.text(a[k], (b[k]+c[k])/2, usgs.codePdf.loc[codeLst[k]]['shortName'])
ax.plot([a, a], [b, c], c='0.5')
ax.plot(a, b, 'b*')
ax.plot(a, c, 'r*')
# ax.set_xlim([0.2, 1.2])
# ax.set_ylim([-1.5, 3])
# plt.xscale('symlog')
ax.axhline(0, color='k')
ax.axvline(0.4, color='k')
fig.show()
##
a = np.nanmean(matLR, axis=0)
b = np.nanmean(corrLst2[0]**2, axis=0)
c = np.nanmean(corrLst2[1]**2, axis=0)
fig, ax = plt.subplots(1, 1)
for k in range(len(codeLst)):
ax.text(a[k], (b[k]+c[k])/2, usgs.codePdf.loc[codeLst[k]]['shortName'])
ax.plot([a, a], [b, c], c='0.5')
ax.plot(a, b, 'b*')
ax.plot(a, c, 'r*')
# ax.set_xlim([0.2, 1.2])
# ax.set_ylim([-1.5, 3])
# plt.xscale('symlog')
fig.show()
| [
"hydroDL.data.dbBasin.DataFrameBasin",
"pandas.read_csv",
"os.path.join",
"numpy.nanmean",
"hydroDL.master.basinFull.nameFolder",
"numpy.isnan",
"numpy.nansum",
"numpy.load",
"matplotlib.pyplot.subplots"
] | [((350, 380), 'hydroDL.data.dbBasin.DataFrameBasin', 'dbBasin.DataFrameBasin', (['"""G200"""'], {}), "('G200')\n", (372, 380), False, 'from hydroDL.data import usgs, gageII, gridMET, ntn, GLASS, transform, dbBasin\n'), ((1029, 1084), 'os.path.join', 'os.path.join', (['kPath.dirWQ', '"""modelStat"""', '"""WRTDS-dbBasin"""'], {}), "(kPath.dirWQ, 'modelStat', 'WRTDS-dbBasin')\n", (1041, 1084), False, 'import os\n'), ((1230, 1263), 'os.path.join', 'os.path.join', (['dirWRTDS', 'corrName1'], {}), '(dirWRTDS, corrName1)\n', (1242, 1263), False, 'import os\n'), ((1276, 1309), 'os.path.join', 'os.path.join', (['dirWRTDS', 'corrName2'], {}), '(dirWRTDS, corrName2)\n', (1288, 1309), False, 'import os\n'), ((1319, 1337), 'numpy.load', 'np.load', (['corrFile1'], {}), '(corrFile1)\n', (1326, 1337), True, 'import numpy as np\n'), ((1347, 1365), 'numpy.load', 'np.load', (['corrFile2'], {}), '(corrFile2)\n', (1354, 1365), True, 'import numpy as np\n'), ((1516, 1540), 'numpy.nansum', 'np.nansum', (['matB1'], {'axis': '(0)'}), '(matB1, axis=0)\n', (1525, 1540), True, 'import numpy as np\n'), ((1550, 1574), 'numpy.nansum', 'np.nansum', (['matB2'], {'axis': '(0)'}), '(matB2, axis=0)\n', (1559, 1574), True, 'import numpy as np\n'), ((2062, 2087), 'numpy.nanmean', 'np.nanmean', (['matLR'], {'axis': '(0)'}), '(matLR, axis=0)\n', (2072, 2087), True, 'import numpy as np\n'), ((2092, 2142), 'numpy.nanmean', 'np.nanmean', (['(corrLst2[0] ** 2 - corrW2 ** 2)'], {'axis': '(0)'}), '(corrLst2[0] ** 2 - corrW2 ** 2, axis=0)\n', (2102, 2142), True, 'import numpy as np\n'), ((2143, 2193), 'numpy.nanmean', 'np.nanmean', (['(corrLst2[1] ** 2 - corrW2 ** 2)'], {'axis': '(0)'}), '(corrLst2[1] ** 2 - corrW2 ** 2, axis=0)\n', (2153, 2193), True, 'import numpy as np\n'), ((2201, 2219), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2213, 2219), True, 'import matplotlib.pyplot as plt\n'), ((2549, 2574), 'numpy.nanmean', 'np.nanmean', (['matLR'], {'axis': '(0)'}), '(matLR, axis=0)\n', (2559, 2574), True, 'import numpy as np\n'), ((2579, 2615), 'numpy.nanmean', 'np.nanmean', (['(corrLst2[0] ** 2)'], {'axis': '(0)'}), '(corrLst2[0] ** 2, axis=0)\n', (2589, 2615), True, 'import numpy as np\n'), ((2618, 2654), 'numpy.nanmean', 'np.nanmean', (['(corrLst2[1] ** 2)'], {'axis': '(0)'}), '(corrLst2[1] ** 2, axis=0)\n', (2628, 2654), True, 'import numpy as np\n'), ((2664, 2682), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2676, 2682), True, 'import matplotlib.pyplot as plt\n'), ((645, 674), 'hydroDL.master.basinFull.nameFolder', 'basinFull.nameFolder', (['outName'], {}), '(outName)\n', (665, 674), False, 'from hydroDL.master import basinFull\n'), ((802, 836), 'os.path.join', 'os.path.join', (['outFolder', 'corrName1'], {}), '(outFolder, corrName1)\n', (814, 836), False, 'import os\n'), ((853, 887), 'os.path.join', 'os.path.join', (['outFolder', 'corrName2'], {}), '(outFolder, corrName2)\n', (865, 887), False, 'import os\n'), ((901, 919), 'numpy.load', 'np.load', (['corrFile1'], {}), '(corrFile1)\n', (908, 919), True, 'import numpy as np\n'), ((933, 951), 'numpy.load', 'np.load', (['corrFile2'], {}), '(corrFile2)\n', (940, 951), True, 'import numpy as np\n'), ((1890, 1916), 'os.path.join', 'os.path.join', (['dirPar', 'code'], {}), '(dirPar, code)\n', (1902, 1916), False, 'import os\n'), ((1930, 1973), 'pandas.read_csv', 'pd.read_csv', (['filePar'], {'dtype': "{'siteNo': str}"}), "(filePar, dtype={'siteNo': str})\n", (1941, 1973), True, 'import pandas as pd\n'), ((1384, 1398), 'numpy.isnan', 'np.isnan', (['DF.c'], {}), '(DF.c)\n', (1392, 1398), True, 'import numpy as np\n')] |
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
import pyqtgraph
import PyQt5.QtGui as qtg
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
matplotlib.use('Qt5Agg')
class MatplotlibWidget(qtg.QWidget):
"""
Implements a Matplotlib figure inside a QWidget.
Use getFigure() and redraw() to interact with matplotlib.
Example::
mw = MatplotlibWidget()
subplot = mw.getFigure().add_subplot(111)
subplot.plot(x,y)
mw.draw()
"""
def __init__(self, size=(5.0, 4.0), dpi=100):
qtg.QWidget.__init__(self)
self.fig = Figure(size, dpi=dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.toolbar = NavigationToolbar(self.canvas, self)
self.vbox = qtg.QVBoxLayout()
self.vbox.addWidget(self.toolbar)
self.vbox.addWidget(self.canvas)
self.setLayout(self.vbox)
def getFigure(self):
return self.fig
def draw(self):
self.canvas.draw()
# Create the data
fs = 10e3
N = 1e5
amp = 2 * np.sqrt(2)
noise_power = 0.01 * fs / 2
time = np.arange(N) / float(fs)
mod = 500*np.cos(2*np.pi*0.25*time)
carrier = amp * np.sin(2*np.pi*3e3*time + mod)
noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
noise *= np.exp(-time/5)
x = carrier + noise
f, t, Sxx = signal.spectrogram(x, fs)
# Interpret image data as row-major instead of col-major
pyqtgraph.setConfigOptions(imageAxisOrder='row-major')
pyqtgraph.mkQApp()
win = pyqtgraph.GraphicsLayoutWidget()
# A plot area (ViewBox + axes) for displaying the image
p1 = win.addPlot()
# Item for displaying image data
img = pyqtgraph.ImageItem()
p1.addItem(img)
# Add a histogram with which to control the gradient of the image
hist = pyqtgraph.HistogramLUTItem()
# Link the histogram to the image
hist.setImageItem(img)
# If you don't add the histogram to the window, it stays invisible, but I find it useful.
win.addItem(hist)
# Show the window
win.show()
# Fit the min and max levels of the histogram to the data available
hist.setLevels(np.min(Sxx), np.max(Sxx))
# This gradient is roughly comparable to the gradient used by Matplotlib
# You can adjust it and then save it using hist.gradient.saveState()
hist.gradient.restoreState(
{'mode': 'rgb',
'ticks': [(0.5, (0, 182, 188, 255)),
(1.0, (246, 111, 0, 255)),
(0.0, (75, 0, 113, 255))]})
# Sxx contains the amplitude for each pixel
img.setImage(Sxx)
# Scale the X and Y Axis to time and frequency (standard is pixels)
img.scale(t[-1]/np.size(Sxx, axis=1),
f[-1]/np.size(Sxx, axis=0))
# Limit panning/zooming to the spectrogram
p1.setLimits(xMin=0, xMax=t[-1], yMin=0, yMax=f[-1])
# Add labels to the axis
p1.setLabel('bottom', "Time", units='s')
# If you include the units, Pyqtgraph automatically scales the axis and adjusts the SI prefix (in this case kHz)
p1.setLabel('left', "Frequency", units='Hz')
# Plotting with Matplotlib in comparison
plt.pcolormesh(t, f, Sxx)
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.colorbar()
plt.show()
pyqtgraph.Qt.QtGui.QApplication.instance().exec_()
| [
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"scipy.signal.spectrogram",
"matplotlib.pyplot.pcolormesh",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"pyqtgraph.ImageItem",
"numpy.max",
"numpy.exp",
"PyQt5.QtGui.QVBoxLayout",
... | [((344, 368), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (358, 368), False, 'import matplotlib\n'), ((1485, 1502), 'numpy.exp', 'np.exp', (['(-time / 5)'], {}), '(-time / 5)\n', (1491, 1502), True, 'import numpy as np\n'), ((1533, 1558), 'scipy.signal.spectrogram', 'signal.spectrogram', (['x', 'fs'], {}), '(x, fs)\n', (1551, 1558), False, 'from scipy import signal\n'), ((1617, 1671), 'pyqtgraph.setConfigOptions', 'pyqtgraph.setConfigOptions', ([], {'imageAxisOrder': '"""row-major"""'}), "(imageAxisOrder='row-major')\n", (1643, 1671), False, 'import pyqtgraph\n'), ((1673, 1691), 'pyqtgraph.mkQApp', 'pyqtgraph.mkQApp', ([], {}), '()\n', (1689, 1691), False, 'import pyqtgraph\n'), ((1698, 1730), 'pyqtgraph.GraphicsLayoutWidget', 'pyqtgraph.GraphicsLayoutWidget', ([], {}), '()\n', (1728, 1730), False, 'import pyqtgraph\n'), ((1846, 1867), 'pyqtgraph.ImageItem', 'pyqtgraph.ImageItem', ([], {}), '()\n', (1865, 1867), False, 'import pyqtgraph\n'), ((1957, 1985), 'pyqtgraph.HistogramLUTItem', 'pyqtgraph.HistogramLUTItem', ([], {}), '()\n', (1983, 1985), False, 'import pyqtgraph\n'), ((3174, 3199), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['t', 'f', 'Sxx'], {}), '(t, f, Sxx)\n', (3188, 3199), True, 'import matplotlib.pyplot as plt\n'), ((3200, 3228), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency [Hz]"""'], {}), "('Frequency [Hz]')\n", (3210, 3228), True, 'import matplotlib.pyplot as plt\n'), ((3229, 3253), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [sec]"""'], {}), "('Time [sec]')\n", (3239, 3253), True, 'import matplotlib.pyplot as plt\n'), ((3254, 3268), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3266, 3268), True, 'import matplotlib.pyplot as plt\n'), ((3269, 3279), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3277, 3279), True, 'import matplotlib.pyplot as plt\n'), ((1252, 1262), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1259, 1262), True, 'import numpy as np\n'), ((1298, 1310), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1307, 1310), True, 'import numpy as np\n'), ((1333, 1364), 'numpy.cos', 'np.cos', (['(2 * np.pi * 0.25 * time)'], {}), '(2 * np.pi * 0.25 * time)\n', (1339, 1364), True, 'import numpy as np\n'), ((1375, 1414), 'numpy.sin', 'np.sin', (['(2 * np.pi * 3000.0 * time + mod)'], {}), '(2 * np.pi * 3000.0 * time + mod)\n', (1381, 1414), True, 'import numpy as np\n'), ((2263, 2274), 'numpy.min', 'np.min', (['Sxx'], {}), '(Sxx)\n', (2269, 2274), True, 'import numpy as np\n'), ((2276, 2287), 'numpy.max', 'np.max', (['Sxx'], {}), '(Sxx)\n', (2282, 2287), True, 'import numpy as np\n'), ((740, 766), 'PyQt5.QtGui.QWidget.__init__', 'qtg.QWidget.__init__', (['self'], {}), '(self)\n', (760, 766), True, 'import PyQt5.QtGui as qtg\n'), ((786, 807), 'matplotlib.figure.Figure', 'Figure', (['size'], {'dpi': 'dpi'}), '(size, dpi=dpi)\n', (792, 807), False, 'from matplotlib.figure import Figure\n'), ((830, 852), 'matplotlib.backends.backend_qt5agg.FigureCanvas', 'FigureCanvas', (['self.fig'], {}), '(self.fig)\n', (842, 852), False, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, FigureCanvas\n'), ((912, 948), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self.canvas', 'self'], {}), '(self.canvas, self)\n', (929, 948), True, 'from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\n'), ((970, 987), 'PyQt5.QtGui.QVBoxLayout', 'qtg.QVBoxLayout', ([], {}), '()\n', (985, 987), True, 'import PyQt5.QtGui as qtg\n'), ((1437, 1457), 'numpy.sqrt', 'np.sqrt', (['noise_power'], {}), '(noise_power)\n', (1444, 1457), True, 'import numpy as np\n'), ((2752, 2772), 'numpy.size', 'np.size', (['Sxx'], {'axis': '(1)'}), '(Sxx, axis=1)\n', (2759, 2772), True, 'import numpy as np\n'), ((2790, 2810), 'numpy.size', 'np.size', (['Sxx'], {'axis': '(0)'}), '(Sxx, axis=0)\n', (2797, 2810), True, 'import numpy as np\n'), ((3281, 3323), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'pyqtgraph.Qt.QtGui.QApplication.instance', ([], {}), '()\n', (3321, 3323), False, 'import pyqtgraph\n')] |
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this open-source project.
""" Define the functions to load data. """
import os
import json
import argparse
import numpy as np
def load_data(data_dir, interval=100, data_type='2D'):
music_data, dance_data = [], []
fnames = sorted(os.listdir(data_dir))
# fnames = fnames[:10] # For debug
for fname in fnames:
path = os.path.join(data_dir, fname)
with open(path) as f:
sample_dict = json.loads(f.read())
np_music = np.array(sample_dict['music_array'])
np_dance = np.array(sample_dict['dance_array'])
if data_type == '2D':
# Only use 25 keypoints skeleton (basic bone) for 2D
np_dance = np_dance[:, :50]
root = np_dance[:, 2*8:2*9]
np_dance = np_dance - np.tile(root, (1, 25))
np_dance[:, 2*8:2*9] = root
seq_len, dim = np_music.shape
for i in range(0, seq_len, interval):
music_sub_seq = np_music[i: i + interval]
dance_sub_seq = np_dance[i: i + interval]
if len(music_sub_seq) == interval:
music_data.append(music_sub_seq)
dance_data.append(dance_sub_seq)
return music_data, dance_data
def load_test_data(data_dir, data_type='2D'):
music_data, dance_data = [], []
fnames = sorted(os.listdir(data_dir))
print(fnames)
# fnames = fnames[:60] # For debug
for fname in fnames:
path = os.path.join(data_dir, fname)
with open(path) as f:
sample_dict = json.loads(f.read())
np_music = np.array(sample_dict['music_array'])
np_dance = np.array(sample_dict['dance_array'])
if data_type == '2D':
# Only use 25 keypoints skeleton (basic bone) for 2D
np_dance = np_dance[:, :50]
root = np_dance[:, 2*8:2*9]
np_dance = np_dance - np.tile(root, (1, 25))
np_dance[:, 2*8:2*9] = root
music_data.append(np_music)
dance_data.append(np_dance)
return music_data, dance_data, fnames
def load_json_data(data_file, max_seq_len=150):
music_data = []
dance_data = []
count = 0
total_count = 0
with open(data_file) as f:
data_list = json.loads(f.read())
for data in data_list:
# The first and last segment may be unusable
music_segs = data['music_segments']
dance_segs = data['dance_segments']
assert len(music_segs) == len(dance_segs), 'alignment'
for i in range(len(music_segs)):
total_count += 1
if len(music_segs[i]) > max_seq_len:
count += 1
continue
music_data.append(music_segs[i])
dance_data.append(dance_segs[i])
rate = count / total_count
print(f'total num of segments: {total_count}')
print(f'num of segments length > {max_seq_len}: {count}')
print(f'the rate: {rate}')
return music_data, dance_data
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
| [
"numpy.tile",
"os.listdir",
"os.path.join",
"argparse.ArgumentTypeError",
"numpy.array"
] | [((360, 380), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (370, 380), False, 'import os\n'), ((465, 494), 'os.path.join', 'os.path.join', (['data_dir', 'fname'], {}), '(data_dir, fname)\n', (477, 494), False, 'import os\n'), ((1518, 1538), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (1528, 1538), False, 'import os\n'), ((1642, 1671), 'os.path.join', 'os.path.join', (['data_dir', 'fname'], {}), '(data_dir, fname)\n', (1654, 1671), False, 'import os\n'), ((598, 634), 'numpy.array', 'np.array', (["sample_dict['music_array']"], {}), "(sample_dict['music_array'])\n", (606, 634), True, 'import numpy as np\n'), ((659, 695), 'numpy.array', 'np.array', (["sample_dict['dance_array']"], {}), "(sample_dict['dance_array'])\n", (667, 695), True, 'import numpy as np\n'), ((1775, 1811), 'numpy.array', 'np.array', (["sample_dict['music_array']"], {}), "(sample_dict['music_array'])\n", (1783, 1811), True, 'import numpy as np\n'), ((1836, 1872), 'numpy.array', 'np.array', (["sample_dict['dance_array']"], {}), "(sample_dict['dance_array'])\n", (1844, 1872), True, 'import numpy as np\n'), ((3482, 3535), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (3508, 3535), False, 'import argparse\n'), ((930, 952), 'numpy.tile', 'np.tile', (['root', '(1, 25)'], {}), '(root, (1, 25))\n', (937, 952), True, 'import numpy as np\n'), ((2107, 2129), 'numpy.tile', 'np.tile', (['root', '(1, 25)'], {}), '(root, (1, 25))\n', (2114, 2129), True, 'import numpy as np\n')] |
# pylint: disable=no-self-use,invalid-name
import numpy
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.data.fields import ArrayField, ListField
class TestArrayField(AllenNlpTestCase):
def test_get_padding_lengths_correctly_returns_ordered_shape(self):
shape = [3, 4, 5, 6]
array = numpy.zeros(shape)
array_field = ArrayField(array)
lengths = array_field.get_padding_lengths()
for i in range(len(lengths)):
assert lengths["dimension_{}".format(i)] == shape[i]
def test_as_tensor_handles_larger_padding_dimensions(self):
shape = [3, 4]
array = numpy.ones(shape)
array_field = ArrayField(array)
padded_tensor = array_field.as_tensor({"dimension_0": 5, "dimension_1": 6}).data.cpu().numpy()
numpy.testing.assert_array_equal(padded_tensor[:3, :4], array)
numpy.testing.assert_array_equal(padded_tensor[3:, 4:], 0.)
def test_padding_handles_list_fields(self):
array1 = ArrayField(numpy.ones([2, 3]))
array2 = ArrayField(numpy.ones([1, 5]))
empty_array = array1.empty_field()
list_field = ListField([array1, array2, empty_array])
returned_tensor = list_field.as_tensor(list_field.get_padding_lengths()).data.cpu().numpy()
correct_tensor = numpy.array([[[1., 1., 1., 0., 0.],
[1., 1., 1., 0., 0.]],
[[1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]])
numpy.testing.assert_array_equal(returned_tensor, correct_tensor)
def test_padding_handles_list_fields_with_padding_values(self):
array1 = ArrayField(numpy.ones([2, 3]), padding_value=-1)
array2 = ArrayField(numpy.ones([1, 5]), padding_value=-1)
empty_array = array1.empty_field()
list_field = ListField([array1, array2, empty_array])
returned_tensor = list_field.as_tensor(list_field.get_padding_lengths()).data.cpu().numpy()
correct_tensor = numpy.array([[[1., 1., 1., -1., -1.],
[1., 1., 1., -1., -1.]],
[[1., 1., 1., 1., 1.],
[-1., -1., -1., -1., -1.]],
[[-1., -1., -1., -1., -1.],
[-1., -1., -1., -1., -1.]]])
numpy.testing.assert_array_equal(returned_tensor, correct_tensor)
| [
"numpy.ones",
"allennlp.data.fields.ArrayField",
"numpy.array",
"numpy.zeros",
"allennlp.data.fields.ListField",
"numpy.testing.assert_array_equal"
] | [((335, 353), 'numpy.zeros', 'numpy.zeros', (['shape'], {}), '(shape)\n', (346, 353), False, 'import numpy\n'), ((376, 393), 'allennlp.data.fields.ArrayField', 'ArrayField', (['array'], {}), '(array)\n', (386, 393), False, 'from allennlp.data.fields import ArrayField, ListField\n'), ((653, 670), 'numpy.ones', 'numpy.ones', (['shape'], {}), '(shape)\n', (663, 670), False, 'import numpy\n'), ((693, 710), 'allennlp.data.fields.ArrayField', 'ArrayField', (['array'], {}), '(array)\n', (703, 710), False, 'from allennlp.data.fields import ArrayField, ListField\n'), ((823, 885), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['padded_tensor[:3, :4]', 'array'], {}), '(padded_tensor[:3, :4], array)\n', (855, 885), False, 'import numpy\n'), ((894, 954), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['padded_tensor[3:, 4:]', '(0.0)'], {}), '(padded_tensor[3:, 4:], 0.0)\n', (926, 954), False, 'import numpy\n'), ((1163, 1203), 'allennlp.data.fields.ListField', 'ListField', (['[array1, array2, empty_array]'], {}), '([array1, array2, empty_array])\n', (1172, 1203), False, 'from allennlp.data.fields import ArrayField, ListField\n'), ((1330, 1520), 'numpy.array', 'numpy.array', (['[[[1.0, 1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0, 0.0]], [[1.0, 1.0, 1.0, \n 1.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0, 0.0], [0.0,\n 0.0, 0.0, 0.0, 0.0]]]'], {}), '([[[1.0, 1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0, 0.0]], [[1.0,\n 1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0, \n 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]]])\n', (1341, 1520), False, 'import numpy\n'), ((1683, 1748), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['returned_tensor', 'correct_tensor'], {}), '(returned_tensor, correct_tensor)\n', (1715, 1748), False, 'import numpy\n'), ((2014, 2054), 'allennlp.data.fields.ListField', 'ListField', (['[array1, array2, empty_array]'], {}), '([array1, array2, empty_array])\n', (2023, 2054), False, 'from allennlp.data.fields import ArrayField, ListField\n'), ((2181, 2390), 'numpy.array', 'numpy.array', (['[[[1.0, 1.0, 1.0, -1.0, -1.0], [1.0, 1.0, 1.0, -1.0, -1.0]], [[1.0, 1.0, \n 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0, -1.0]], [[-1.0, -1.0, -1.0, -\n 1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0]]]'], {}), '([[[1.0, 1.0, 1.0, -1.0, -1.0], [1.0, 1.0, 1.0, -1.0, -1.0]], [[\n 1.0, 1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0, -1.0]], [[-1.0, -1.0,\n -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0]]])\n', (2192, 2390), False, 'import numpy\n'), ((2553, 2618), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['returned_tensor', 'correct_tensor'], {}), '(returned_tensor, correct_tensor)\n', (2585, 2618), False, 'import numpy\n'), ((1031, 1049), 'numpy.ones', 'numpy.ones', (['[2, 3]'], {}), '([2, 3])\n', (1041, 1049), False, 'import numpy\n'), ((1079, 1097), 'numpy.ones', 'numpy.ones', (['[1, 5]'], {}), '([1, 5])\n', (1089, 1097), False, 'import numpy\n'), ((1846, 1864), 'numpy.ones', 'numpy.ones', (['[2, 3]'], {}), '([2, 3])\n', (1856, 1864), False, 'import numpy\n'), ((1912, 1930), 'numpy.ones', 'numpy.ones', (['[1, 5]'], {}), '([1, 5])\n', (1922, 1930), False, 'import numpy\n')] |
from src import geometry_profiles as gp
import typing
import numpy as np
class LightProfile(gp.GeometryProfile):
def __init__(
self,
centre: typing.Tuple[float, float] = (0.0, 0.0),
axis_ratio: float = 1.0,
angle: float = 0.0,
normalization: float = 0.1,
radius: float = 0.6,
):
"""
Abstract base class for a light profile, which describes the emission of a galaxy as a function of radius.
Parameters
----------
centre
The (y,x) coordinates of the profile centre.
axis_ratio
The axis-ratio of the ellipse (minor axis / major axis).
angle
The rotation angle in degrees counter-clockwise from the positive x-axis.
normalization
Overall normalization normalisation of the light profile.
radius
The circular radius containing half the light of this profile.
"""
super().__init__(centre=centre, axis_ratio=axis_ratio, angle=angle)
self.normalization = normalization
self.radius = radius
class LightDeVaucouleurs(LightProfile):
def __init__(
self,
centre: typing.Tuple[float, float] = (0.0, 0.0),
axis_ratio: float = 1.0,
angle: float = 0.0,
normalization: float = 0.1,
radius: float = 0.6,
):
"""
The De Vaucouleurs light profile often used in Astronomy to represent the bulge of galaxies.
Parameters
----------
centre
The (y,x) coordinates of the profile centre.
axis_ratio
The axis-ratio of the ellipse (minor axis / major axis).
angle
The rotation angle in degrees counter-clockwise from the positive x-axis.
normalization
Overall normalization normalisation of the light profile.
radius
The circular radius containing half the light of this profile.
"""
super().__init__(
centre=centre,
axis_ratio=axis_ratio,
angle=angle,
normalization=normalization,
radius=radius,
)
def image_from_grid(self, grid: np.ndarray) -> np.ndarray:
"""
Returns the image of the De Vaucouleurs light profile on a grid of Cartesian (y,x) coordinates, which are
first translated to the profile's reference frame.
Parameters
----------
grid
The (y, x) coordinates where the image is computed.
"""
grid_transformed = self.transform_grid_to_reference_frame(grid=grid)
grid_elliptical_radii = self.grid_to_elliptical_radii(grid=grid_transformed)
return self.normalization * np.exp(
-7.66924 * ((grid_elliptical_radii / self.radius) ** (1.0 / 7.66924) - 1.0)
)
class LightExponential(LightProfile):
def __init__(
self,
centre: typing.Tuple[float, float] = (0.0, 0.0),
axis_ratio: float = 1.0,
angle: float = 0.0,
normalization: float = 0.1,
radius: float = 0.6,
):
"""
The Exponential light profile often used in Astronomy to represent the disk of galaxies.
Parameters
----------
centre
The (y,x) coordinates of the profile centre.
axis_ratio
The axis-ratio of the ellipse (minor axis / major axis).
angle
The rotation angle in degrees counter-clockwise from the positive x-axis.
normalization
Overall normalization normalisation of the light profile.
radius
The circular radius containing half the light of this profile.
"""
super().__init__(
centre=centre,
axis_ratio=axis_ratio,
angle=angle,
normalization=normalization,
radius=radius,
)
def image_from_grid(self, grid: np.ndarray) -> np.ndarray:
"""
Returns the image of the light profile on a grid of Cartesian (y,x) coordinates.
Parameters
----------
grid
The (y, x) coordinates where the image is computed.
"""
grid_transformed = self.transform_grid_to_reference_frame(grid=grid)
grid_elliptical_radii = self.grid_to_elliptical_radii(grid=grid_transformed)
return self.normalization * np.exp(
-1.67838 * ((grid_elliptical_radii / self.radius) ** (1.0 / 1.67838) - 1.0)
)
| [
"numpy.exp"
] | [((2831, 2918), 'numpy.exp', 'np.exp', (['(-7.66924 * ((grid_elliptical_radii / self.radius) ** (1.0 / 7.66924) - 1.0))'], {}), '(-7.66924 * ((grid_elliptical_radii / self.radius) ** (1.0 / 7.66924) -\n 1.0))\n', (2837, 2918), True, 'import numpy as np\n'), ((4536, 4623), 'numpy.exp', 'np.exp', (['(-1.67838 * ((grid_elliptical_radii / self.radius) ** (1.0 / 1.67838) - 1.0))'], {}), '(-1.67838 * ((grid_elliptical_radii / self.radius) ** (1.0 / 1.67838) -\n 1.0))\n', (4542, 4623), True, 'import numpy as np\n')] |
from abc import ABC, abstractmethod
from copy import deepcopy
import numpy as np
import tensorflow as tf
# source: http://geomalgorithms.com/a06-_intersect-2.html
# source: https://www.erikrotteveel.com/python/three-dimensional-ray-tracing-in-python/
gpu_phy_devices = tf.config.list_physical_devices('GPU')
try:
for gpu in gpu_phy_devices:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError:
pass
faraway = 99999 # faraway distance
precision = tf.float64 # default precision
pi = tf.constant(np.pi, dtype=precision)
# for numerical stability epsilon
if precision == tf.float32:
epsilon = tf.constant(1.e-07, precision)
elif precision == tf.float64:
epsilon = tf.constant(1.e-15, precision)
def set_precision(p):
global precision
precision = p
def mag(tensor):
"""
Calculate magnitude of the vector, return scalar tensor
"""
if tf.equal(tensor.get_shape().rank, 1):
_mag = tf.sqrt(tf.tensordot(tensor, tensor, 1))
else:
_mag = tf.sqrt(tf.reduce_sum(tensor*tensor, 1))
return _mag
def ray_reflection(rays, normal):
"""
Calculate reflection of rays `rays` with normal `normal`
`
:param rays: Rays directional vector, shape Nx3
:type rays: Ray
:param normal: normal vector
:type normal: tf.Tensor
"""
ray_direction = rays.p1 - tf.multiply(normal, tf.expand_dims(tf.reduce_sum(normal * rays.p1, 1), 1)) * 2.
# if directional vector small enough, then assume 0.
ray_direction = tf.where(tf.greater(tf.abs(ray_direction), epsilon), ray_direction, tf.zeros_like(ray_direction))
return ray_direction
def norm(tensor):
"""
Calculate norm of the vector, return normalized vector
"""
_mag = mag(tensor)
if tf.equal(tensor.get_shape().rank, 1):
return tensor * (1.0 / tf.where(tf.less_equal(_mag, epsilon), tf.ones_like(_mag), _mag))
else:
return tensor * tf.expand_dims(1.0 / tf.where(tf.less_equal(_mag, epsilon), tf.ones_like(_mag), _mag), 1)
def tile_vector(tensor, num):
return tf.tile(tf.expand_dims(tensor, 0), [num, 1])
def polar(tensor):
_norm = norm(tensor)
phi, theta = tf.math.atan2((_norm[:, 0]+epsilon), _norm[:, 1]), tf.math.acos(_norm[:, 2])
return tf.where(tf.less(phi, 0.), 2*pi+phi, phi), theta
class Ray:
def __init__(self, p0, p1, intensity, interact_num):
"""
Basic Ray class, originating from `p0` with a directional vector of `p1
`
:param p0: 3D vectors for the origins of rays
:type p0: tf.Tensor
:param p1: 3D vectors for the origins of rays
:type p1: tf.Tensor
:param intensity: Initial intensity of rays
:type intensity: tf.Tensor
:param interact_num: Initial number of interaction experienced by rays
:type interact_num: tf.Tensor
"""
self.p0 = p0 # ray origins
self.p1 = p1 # ray direction
self.intensity = intensity
self.interact_num = interact_num
p0_rows, p0_columns = p0.get_shape()
tf.debugging.assert_equal(tf.size(self.p0), tf.size(self.p1), message="Rays shape not equal")
tf.debugging.assert_equal(p0_rows, tf.size(self.intensity), message="Rays shape not equal")
tf.debugging.assert_equal(p0_rows, tf.size(self.interact_num), message="Rays shape not equal")
def __getitem__(self, key):
return Ray(self.p0[key], self.p1[key], self.intensity[key], self.interact_num[key])
def __setitem__(self, key, value):
if key.dtype == tf.bool:
key_3 = tf.concat([tf.expand_dims(key, 1), tf.expand_dims(key, 1), tf.expand_dims(key, 1)], 1)
self.p0 = tf.where(key_3, value.p0, self.p0)
self.p1 = tf.where(key_3, value.p1, self.p1)
self.intensity = tf.where(key, value.intensity, self.intensity)
self.interact_num = tf.where(key, value.interact_num, self.interact_num)
else:
self.p0[key] = value.p0
self.p1[key] = value.p1
self.intensity[key] = value.intensity
self.interact_num[key] = value.interact_num
def size(self):
num_rays = tf.size(self.p0) // 3
return num_rays
def copy(self):
return deepcopy(self)
class Surface(ABC):
"""
Basic class for surfaces
"""
def __init__(self):
pass
@abstractmethod
def vertices(self):
pass
class Triangle(Surface):
def __init__(self, v0, v1, v2, reflectivity=1.):
"""
A triangle with vertices `v0`, `v1`, `v2` and `reflectivity`
:param v0: 3D vectors for a vertex
:type v0: tf.Tensor
:param v1: 3D vectors for a vertex
:type v1: tf.Tensor
:param v2: 3D vectors for a vertex
:type v2: tf.Tensor
:param reflectivity: Reflectivity of the surface
:type reflectivity: float
"""
super().__init__()
self.v0 = tf.cast(v0, precision)
self.v1 = tf.cast(v1, precision)
self.v2 = tf.cast(v2, precision)
self.u = self.v1 - self.v0
self.v = self.v2 - self.v0
self.reflectivity = reflectivity
self.normal = norm(tf.linalg.cross(self.u, self.v))
@property
def vertices(self):
return tf.stack([self.v0, self.v1, self.v2])
def intersect(self, rays):
num_rays = rays.size()
tiled_v = tile_vector(self.v, num_rays)
tiled_u = tile_vector(self.u, num_rays)
tiled_normal = tile_vector(self.normal, num_rays)
b = tf.reduce_sum(tiled_normal*rays.p1, 1)
a = tf.reduce_sum(tiled_normal*(self.v0 - rays.p0), 1)
# check if the ray is close enough to be parallel or close enough to lie in the plane
cond_0_1 = tf.greater(tf.abs(b), epsilon)
cond_0_2 = tf.greater(tf.abs(a), epsilon)
cond_0 = tf.logical_and(cond_0_1, cond_0_2)
rI = tf.expand_dims(tf.where(tf.logical_or(cond_0, tf.less(a/b, 0.)), a/b, tf.zeros_like(a)), -1)
rI = tf.where(tf.greater(tf.abs(rI), epsilon), rI, tf.zeros_like(rI))
p_intersect = rays.p0 + rays.p1 * rI
w = p_intersect - self.v0 # p0 + rI * p1 - v0
wv_dot = tf.reduce_sum(w*tiled_v, 1)
wu_dot = tf.reduce_sum(w*tiled_u, 1)
uv_dot = tf.tensordot(self.u, self.v, 1)
uu_dot = tf.tensordot(self.u, self.u, 1)
vv_dot = tf.tensordot(self.v, self.v, 1)
denom = uv_dot * uv_dot - uu_dot * vv_dot
si = (uv_dot * wv_dot - vv_dot * wu_dot) / denom
ti = (uv_dot * wu_dot - uu_dot * wv_dot) / denom
ray_direction = ray_reflection(rays, tiled_normal)
cond_1 = tf.less_equal(tf.squeeze(rI), 0.)
cond_2 = tf.less(si, 0.)
cond_3 = tf.greater(si, 1.)
cond_4 = tf.less(ti, 0.)
cond_5 = tf.greater(si + ti, 1.)
no_interaction_idx = tf.logical_or(tf.logical_or(tf.logical_or(tf.logical_or(cond_1, cond_2), cond_3), cond_4), cond_5)
no_interaction_idx_3 = tf.concat([tf.expand_dims(no_interaction_idx, 1), tf.expand_dims(no_interaction_idx, 1), tf.expand_dims(no_interaction_idx, 1)], 1)
_p_intersect = tf.where(no_interaction_idx_3, rays.p0, p_intersect)
ray_direction = tf.where(no_interaction_idx_3, rays.p1, ray_direction)
new_interact_num = tf.where(no_interaction_idx, rays.interact_num, rays.interact_num+1)
new_intensity = tf.where(no_interaction_idx, rays.intensity, rays.intensity*self.reflectivity)
return Ray(_p_intersect, ray_direction, intensity=new_intensity, interact_num=new_interact_num)
class Plane(Surface):
def __init__(self, v0, v1, v2, v3, reflectivity=1.):
"""
A plane with vertices `v0`, `v1`, `v2` and `reflectivity`
:param v0: 3D vectors for a vertex
:type v0: tf.Tensor
:param v1: 3D vectors for a vertex
:type v1: tf.Tensor
:param v2: 3D vectors for a vertex
:type v2: tf.Tensor
:param v3: 3D vectors for a vertex
:type v3: tf.Tensor
:param reflectivity: Reflectivity of the surface
:type reflectivity: tf.Tensor
"""
super().__init__()
self.v0 = tf.cast(v0, precision)
self.v1 = tf.cast(v1, precision)
self.v2 = tf.cast(v2, precision)
self.v3 = tf.cast(v3, precision)
self.u = self.v1 - self.v0
self.v = self.v3 - self.v0
self.reflectivity = reflectivity
self.normal = norm(tf.linalg.cross(self.u, self.v))
@property
def vertices(self):
return tf.stack([self.v0, self.v1, self.v2, self.v3])
def intersect(self, rays):
num_rays = rays.size()
tiled_v = tile_vector(self.v, num_rays)
tiled_u = tile_vector(self.u, num_rays)
tiled_normal = tile_vector(self.normal, num_rays)
b = tf.reduce_sum(tiled_normal*rays.p1, 1)
a = tf.reduce_sum(tiled_normal*(self.v0 - rays.p0), 1)
# check if the ray is close enough to be parallel or close enough to lie in the plane
cond_0_1 = tf.greater(tf.abs(b), epsilon)
cond_0_2 = tf.greater(tf.abs(a), epsilon)
cond_0 = tf.logical_and(cond_0_1, cond_0_2)
rI = tf.expand_dims(tf.where(tf.logical_or(cond_0, tf.less(a/b, 0.)), a/b, tf.zeros_like(a)), -1)
p_intersect = rays.p0 + rays.p1 * rI
w = p_intersect - self.v0 # p0 + rI * p1 - v0
wv_dot = tf.reduce_sum(w*tiled_v, 1)
wu_dot = tf.reduce_sum(w*tiled_u, 1)
uv_dot = tf.tensordot(self.u, self.v, 1)
uu_dot = tf.tensordot(self.u, self.u, 1)
vv_dot = tf.tensordot(self.v, self.v, 1)
denom = uv_dot * uv_dot - uu_dot * vv_dot
si = (uv_dot * wv_dot - vv_dot * wu_dot) / denom
ti = (uv_dot * wu_dot - uu_dot * wv_dot) / denom
ray_direction = ray_reflection(rays, tiled_normal)
cond_1 = tf.less(tf.squeeze(rI), epsilon)
cond_2 = tf.less(si, 0.)
cond_3 = tf.greater(si, 1.)
cond_4 = tf.less(ti, 0.)
cond_5 = tf.greater(ti, 1.)
no_interaction_idx = tf.logical_or(tf.logical_or(tf.logical_or(tf.logical_or(cond_1, cond_2), cond_3), cond_4), cond_5)
no_interaction_idx_3 = tf.concat([tf.expand_dims(no_interaction_idx, 1), tf.expand_dims(no_interaction_idx, 1), tf.expand_dims(no_interaction_idx, 1)], 1)
p_intersect = tf.where(no_interaction_idx_3, rays.p0, p_intersect)
ray_direction = tf.where(no_interaction_idx_3, rays.p1, ray_direction)
new_interact_num = tf.where(no_interaction_idx, rays.interact_num, rays.interact_num+1)
new_intensity = tf.where(no_interaction_idx, rays.intensity, rays.intensity*self.reflectivity)
return Ray(p_intersect, ray_direction, intensity=new_intensity, interact_num=new_interact_num)
class Pyramid:
def __init__(self, center, width, height, reflectivity=1.):
"""
A pyramid
:param center: 3D vectors for the center of the base
:type center: tf.Tensor
:param width: width of the base
:type width: float
:param height: height of the base
:type height: float
:param reflectivity: Reflectivity of the surface
:type reflectivity: float
"""
self.center = tf.cast(center, precision) # center of the pyramid base
self.width = tf.cast(width, precision) # width of the pyramid base
self.height = tf.cast(height, precision)
self.reflectivity = reflectivity
self.top_left = self.center + tf.stack([-1. * self.width / 2., self.width / 2., 0.])
self.top_right = self.center + tf.stack([self.width / 2., self.width / 2., 0.])
self.bottom_left = self.center + tf.stack([-1. * self.width / 2., -1. * self.width / 2., 0.])
self.bottom_right = self.center + tf.stack([self.width / 2., -1. * self.width / 2., 0.])
self.top_v = self.center + tf.stack([0., 0., self.height])
self.vertices = tf.stack([self.top_left, self.top_right, self.bottom_right, self.bottom_left, self.top_v])
self.tri_1 = Triangle(self.top_v, self.top_left, self.top_right, self.reflectivity)
self.tri_2 = Triangle(self.top_v, self.top_right, self.bottom_right, self.reflectivity)
self.tri_3 = Triangle(self.top_v, self.bottom_right, self.bottom_left, self.reflectivity)
self.tri_4 = Triangle(self.top_v, self.bottom_left, self.top_left, self.reflectivity)
self.tris = [self.tri_1, self.tri_2, self.tri_3, self.tri_4]
def intersect(self, rays):
_pt = rays.copy() # by default assume not intersecting with pyramid
distance = tf.ones(rays.size(), dtype=precision) * faraway
for tri in self.tris:
pt = tri.intersect(rays)
interacted_idx = tf.greater(pt.interact_num, rays.interact_num)
dist = mag(rays.p0-pt.p0) # get the distance
interacted_w_shortest_idx = tf.logical_and(interacted_idx, tf.less(dist, distance))
if tf.math.count_nonzero(interacted_w_shortest_idx) == 0:
continue
else:
distance = tf.where(interacted_w_shortest_idx, dist, distance)
# its fine, weird indexing
_pt[interacted_w_shortest_idx] = pt
return _pt
class Cone(Surface):
def __init__(self, center, radius, height, reflectivity=1.):
"""
A Cone where the base centered at `center` with height `height`
:param center: 3D vectors for the center of the base
:type center: tf.Tensor
:param radius: radius of the base
:type radius: float
:param height: height of the base
:type height: float
:param reflectivity: Reflectivity of the surface
:type reflectivity: float
"""
super().__init__()
self.center = tf.cast(center, precision)
self.radius = tf.cast(radius, precision)
self.height = tf.cast(height, precision)
self.reflectivity = reflectivity
self.c = self.center + tf.cast(tf.stack([0., 0., height]), precision) # vector for the tips
self.v = self.center - self.c # vector for the axis
self.halfangle = tf.atan(self.radius/self.height)
self.halfangle2 = tf.cos(self.halfangle)**2
@property
def vertices(self):
return tf.stack([self.c])
def intersect(self, rays):
# see http://lousodrome.net/blog/light/2017/01/03/intersection-of-a-ray-and-a-cone/
num_rays = rays.size()
tiled_v = tile_vector(self.v, num_rays)
tiled_c = tile_vector(self.c, num_rays)
co = rays.p0 - tiled_c
p1v_dot = tf.reduce_sum(rays.p1 * tiled_v, 1)
a = p1v_dot * tf.reduce_sum(rays.p1 * tiled_v, 1) - self.halfangle2
b = 2. * (p1v_dot * tf.reduce_sum(tiled_v * co, 1) - tf.reduce_sum(co * rays.p1, 1) * self.halfangle2)
c = tf.reduce_sum(co * tiled_v, 1) ** 2 - tf.reduce_sum(co * co, 1) * self.halfangle2
det = b * b - 4. * a * c
det = tf.where(tf.greater(det, 0.), tf.sqrt(det), tf.ones_like(det) * -1.)
t1 = (-b - det) / (2. * a)
t2 = (-b + det) / (2. * a)
# close enough to 0 then assume 0
t1 = tf.where(tf.greater(tf.abs(t1), epsilon), t1, tf.zeros_like(t1))
t2 = tf.where(tf.greater(tf.abs(t2), epsilon), t2, tf.zeros_like(t2))
good_t_idx = tf.logical_or(tf.less(t1, 0.), tf.logical_and(tf.greater(t2, 0.), tf.less(t2, t1)))
t = tf.where(good_t_idx, t2, t1)
bad_t = tf.where(good_t_idx, t1, t2)
bad_p_intersect = rays.p0 + tf.multiply(rays.p1, tf.expand_dims(bad_t, 1))
bad_cp = bad_p_intersect - tiled_c
bad_h = tf.reduce_sum(bad_cp * tiled_v, 1)
bad_cond_3 = tf.logical_or(tf.less(bad_h, 0.), tf.greater(bad_h, self.height))
p_intersect = rays.p0 + tf.multiply(rays.p1, tf.expand_dims(t, 1))
cp = p_intersect - tiled_c
h = tf.reduce_sum(cp * tiled_v, 1)
cond_3 = tf.logical_or(tf.less(h, 0.), tf.greater(h, self.height))
t_disagree_idx = tf.not_equal(cond_3, bad_cond_3)
t = tf.where(tf.logical_and(t_disagree_idx, tf.logical_not(bad_cond_3)), bad_t, t)
p_intersect = rays.p0 + tf.multiply(rays.p1, tf.expand_dims(t, 1))
cp = p_intersect - tiled_c
h = tf.reduce_sum(cp * tiled_v, 1)
cond_3 = tf.logical_or(tf.less(h, 0.), tf.greater(h, self.height))
normal = norm(tf.multiply(cp, tf.expand_dims(tf.reduce_sum(tiled_v * cp, 1) / tf.reduce_sum(cp * cp, 1), 1)) -
tiled_v)
ray_direction = ray_reflection(rays, normal)
cond_1 = tf.less(det, 0.)
cond_2 = tf.less_equal(t, 0.)
no_interaction_idx = tf.logical_or(tf.logical_or(cond_1, cond_2), cond_3)
no_interaction_idx_3 = tf.concat([tf.expand_dims(no_interaction_idx, 1), tf.expand_dims(no_interaction_idx, 1),
tf.expand_dims(no_interaction_idx, 1)], 1)
p_intersect = tf.where(no_interaction_idx_3, rays.p0, p_intersect)
ray_direction = tf.where(no_interaction_idx_3, rays.p1, ray_direction)
new_interact_num = tf.where(no_interaction_idx, rays.interact_num, rays.interact_num + 1)
new_intensity = tf.where(no_interaction_idx, rays.intensity, rays.intensity * self.reflectivity)
return Ray(p_intersect, ray_direction, intensity=new_intensity, interact_num=new_interact_num)
class PyramidArray:
def __init__(self, center, width, height, resolution, spacing=0., reflectivity=0.1):
"""
An array of pyramid
:param center: 3D vectors for the center of the base
:type center: tf.Tensor
:param width: width of the base
:type width: float
:param height: height of the base
:type height: float
:param resolution: number of pyramid at each side
:type resolution: tuple
:param spacing: spacing between each pyramid
:type spacing: float
:param reflectivity: Reflectivity of the surface
:type reflectivity: float
"""
self.center = tf.cast(center, precision) # detector center
self.width = tf.cast(width, precision) # pixel width
self.height = tf.cast(height, precision) # pixel width
self.resolution = resolution # resolution (W x H)
self.spacing = spacing # spacing between pyramids
self.reflectivity = reflectivity
self.num_pixel = tf.reduce_prod(self.resolution)
self.x_append = (self.resolution[0] - 1) * self.spacing # total extra space from spacing
self.y_append = (self.resolution[1] - 1) * self.spacing # total extra space from spacing
self.x, self.y = self.pixels_locations() # center of each pyramid
self.top_left = self.center + tf.stack([-1. * self.width * self.resolution[0] / 2. - self.x_append/2, self.width * self.resolution[1] / 2. + self.y_append/2, 0.])
self.top_right = self.center + tf.stack([self.width * self.resolution[0] / 2. + self.x_append/2, self.width * self.resolution[1] / 2. + self.x_append/2, 0.])
self.bottom_left = self.center + tf.stack([-1. * self.width * self.resolution[0] / 2. - self.x_append/2, -1. * self.width * self.resolution[1] / 2. - self.x_append/2, 0.])
self.bottom_right = self.center + tf.stack([self.width * self.resolution[0] / 2. + self.x_append/2, -1. * self.width * self.resolution[1] / 2. - self.y_append/2, 0.])
self.pyramid_list = [self.get_pyramid_from_array(i) for i in range(self.num_pixel)]
self.backplane = Plane(self.top_left, self.top_right, self.bottom_right, self.bottom_left) # the plane where pyramids are sitting on, in case spacing != 0
def pixels_locations(self):
physical_w = self.width * self.resolution[0] + self.x_append
physical_h = self.width * self.resolution[1] + self.y_append
all_w = physical_w / 2. - (tf.linspace(tf.constant(0., dtype=precision),
tf.constant(self.resolution[0]-1., dtype=precision),
self.resolution[0]) * self.width) - self.width / 2.
all_h = physical_h / 2. - (tf.linspace(tf.constant(0., dtype=precision),
tf.constant(self.resolution[1]-1., dtype=precision),
self.resolution[1]) * self.width) - self.width / 2.
all_w = all_w - (np.array([range(0, self.resolution[0])]) * self.spacing)
all_h = all_h - (np.array([range(0, self.resolution[0])]) * self.spacing)
x, y = tf.meshgrid(all_w, all_h)
return x, y
def get_pyramid_from_array(self, i):
assert i < self.num_pixel
i = np.unravel_index(i, self.resolution)
return Pyramid(self.center + tf.concat([self.x[i], self.y[i], 0.], 0), self.width, self.height, reflectivity=self.reflectivity)
def intersect(self, rays):
_pt = rays.copy() # by default assume not intersecting with pyramid
distance = tf.ones(rays.size(), dtype=precision) * faraway
for i in range(self.num_pixel):
pt = self.pyramid_list[i].intersect(rays)
interacted_idx = tf.greater(pt.interact_num, rays.interact_num)
dist = mag(rays.p0-pt.p0) # get the distance
interacted_w_shortest_idx = tf.logical_and(interacted_idx, tf.less(dist, distance))
if tf.math.count_nonzero(interacted_w_shortest_idx) == 0:
continue
else:
distance = tf.where(interacted_w_shortest_idx, dist, distance)
# its fine, weird indexing
_pt[interacted_w_shortest_idx] = pt
__pt = self.backplane.intersect(rays)
interacted_idx = tf.greater(__pt.interact_num, rays.interact_num)
dist = mag(rays.p0-__pt.p0) # get the distance
interacted_w_shortest_idx = tf.logical_and(interacted_idx, tf.less(dist, distance))
_pt[interacted_w_shortest_idx] = __pt
return _pt
class ConeArray:
def __init__(self, center, radius, height, resolution, spacing=0., reflectivity=0.1):
"""
An array of cones
:param center: 3D vectors for the center of the base
:type center: tf.Tensor
:param radius: radius of the base
:type radius: float
:param height: height of the base
:type height: float
:param resolution: number of pyramid at each side
:type resolution: tuple
:param spacing: spacing between each pyramid
:type spacing: float
:param reflectivity: Reflectivity of the surface
:type reflectivity: float
"""
self.center = tf.cast(center, precision) # detector center
self.radius = tf.cast(radius, precision) # cone base radius
self.height = tf.cast(height, precision) # cone height
self.resolution = resolution # resolution (W x H)
self.spacing = spacing # spacing between pyramids
self.reflectivity = reflectivity
self.num_pixel = tf.reduce_prod(self.resolution)
self.x_append = (self.resolution[0] - 1) * self.spacing # total extra space from spacing
self.y_append = (self.resolution[1] - 1) * self.spacing # total extra space from spacing
self.x, self.y = self.pixels_locations() # center of each cone
self.top_left = self.center + tf.stack([-2. * self.radius * self.resolution[0] / 2. - self.x_append / 2,
2. * self.radius * self.resolution[1] / 2. + self.y_append / 2,
0.])
self.top_right = self.center + tf.stack([2. * self.radius * self.resolution[0] / 2. + self.x_append / 2,
2. * self.radius * self.resolution[1] / 2. + self.x_append / 2,
0.])
self.bottom_left = self.center + tf.stack([-2. * self.radius * self.resolution[0] / 2. - self.x_append / 2,
-2. * self.radius * self.resolution[1] / 2. - self.x_append / 2,
0.])
self.bottom_right = self.center + tf.stack([2. * self.radius * self.resolution[0] / 2. + self.x_append / 2,
-2. * self.radius * self.resolution[1] / 2. - self.y_append / 2,
0.])
self.pyramid_list = [self.get_pyramid_from_array(i) for i in range(self.num_pixel)]
self.backplane = Plane(self.top_left, self.top_right, self.bottom_right,
self.bottom_left) # the plane where pyramids are sitting on, in case spacing != 0
def pixels_locations(self):
physical_w = 2 * self.radius * self.resolution[0] + self.x_append
physical_h = 2 * self.radius * self.resolution[1] + self.y_append
all_w = physical_w / 2. - (tf.linspace(tf.constant(0., dtype=precision),
tf.constant(self.resolution[0] - 1., dtype=precision),
self.resolution[0]) * self.radius * 2.) - self.radius
all_h = physical_h / 2. - (tf.linspace(tf.constant(0., dtype=precision),
tf.constant(self.resolution[1] - 1., dtype=precision),
self.resolution[1]) * self.radius * 2.) - self.radius
all_w = all_w - (np.array([range(0, self.resolution[0])]) * self.spacing)
all_h = all_h - (np.array([range(0, self.resolution[0])]) * self.spacing)
x, y = tf.meshgrid(all_w, all_h)
return x, y
def get_pyramid_from_array(self, i):
assert i < self.num_pixel
i = np.unravel_index(i, self.resolution)
return Cone(self.center + tf.concat([self.x[i], self.y[i], 0.], 0), self.radius, self.height,
reflectivity=self.reflectivity)
def intersect(self, rays):
_pt = rays.copy() # by default assume not intersecting with pyramid
distance = tf.ones(rays.size(), dtype=precision) * faraway
for i in range(self.num_pixel):
pt = self.pyramid_list[i].intersect(rays)
interacted_idx = tf.greater(pt.interact_num, rays.interact_num)
dist = mag(rays.p0 - pt.p0) # get the distance
interacted_w_shortest_idx = tf.logical_and(interacted_idx, tf.less(dist, distance))
if tf.math.count_nonzero(interacted_w_shortest_idx) == 0:
continue
else:
distance = tf.where(interacted_w_shortest_idx, dist, distance)
# its fine, weird indexing
_pt[interacted_w_shortest_idx] = pt
__pt = self.backplane.intersect(rays)
interacted_idx = tf.greater(__pt.interact_num, rays.interact_num)
dist = mag(rays.p0 - __pt.p0) # get the distance
interacted_w_shortest_idx = tf.logical_and(interacted_idx, tf.less(dist, distance))
_pt[interacted_w_shortest_idx] = __pt
return _pt
class ConeDenseArray:
def __init__(self, center, radius, coneheight, width, height, reflectivity=0.1):
"""
An array of dense cones (fit as many cone as possible automatically)
See https://www.engineeringtoolbox.com/circles-within-rectangle-d_1905.html
:param center: 3D vectors for the center of the base
:type center: tf.Tensor
:param radius: radius of the base
:type radius: float
:param coneheight: height of the base
:type coneheight: float
:param width: width of the box to be filled with cones
:type width: float
:param height: height of the base
:type height: float
:param reflectivity: Reflectivity of the surface
:type reflectivity: float
"""
self.center = tf.cast(center, precision) # detector center
self.radius = tf.cast(radius, precision) # cone base radius
self.coneheight = tf.cast(coneheight, precision) # cone height
self.height = tf.cast(height, precision) # backplane height
self.width = tf.cast(width, precision) # backplane width
self.reflectivity = reflectivity
self.x, self.y = self.pixels_locations() # center of each cone
self.num_pixel = tf.reduce_prod(tf.shape(self.x))
self.top_left = self.center + tf.stack([-1. * self.width / 2., self.height / 2., 0.])
self.top_right = self.center + tf.stack([self.width / 2., self.height / 2., 0.])
self.bottom_left = self.center + tf.stack([-1. * self.width / 2., -1. * self.height / 2., 0.])
self.bottom_right = self.center + tf.stack([self.width / 2., -1. * self.height / 2., 0.])
self.pyramid_list = [self.get_cones_from_array(i) for i in range(self.num_pixel)]
self.backplane = Plane(self.top_left, self.top_right, self.bottom_right, self.bottom_left)
def pixels_locations(self):
rw, rh = self.width, self.height
cd, cs = self.radius * 2, 0.
assert rw > 0.
triangle = 0
x_loc = np.zeros(99999)
y_loc = np.zeros(99999)
posX = cd / 2 + cs
posY = cd / 2 + cs
counter = 0
while posY+cd / 2 <= rh:
while (posX + cd /2 + cs <= rw):
x_loc[counter] = posX
y_loc[counter] = posY
counter = counter + 1
posX = posX + (cd + cs)
if triangle == 0:
posX = (cd + 1.5*cs)
triangle = 1
else:
posX = cd / 2 + cs
triangle = 0
posY = posY + np.power(np.power((cd + cs), 2) * 0.75, 0.5)
# origin was assumed as bottom left corner, need to shift origin to the center of the backplane
x_loc -= self.width/2
y_loc -= self.height/2
# multiple y by -1 to flip the array top-down
return x_loc[:counter], y_loc[:counter] * -1.
def get_cones_from_array(self, i):
assert i < self.num_pixel
return Cone(self.center + tf.concat([self.x[i], self.y[i], 0.], 0), self.radius, self.coneheight,
reflectivity=self.reflectivity)
def intersect(self, rays):
_pt = rays.copy() # by default assume not intersecting with pyramid
distance = tf.ones(rays.size(), dtype=precision) * faraway
for i in range(self.num_pixel):
pt = self.pyramid_list[i].intersect(rays)
interacted_idx = tf.greater(pt.interact_num, rays.interact_num)
dist = mag(rays.p0 - pt.p0) # get the distance
interacted_w_shortest_idx = tf.logical_and(interacted_idx, tf.less(dist, distance))
if tf.math.count_nonzero(interacted_w_shortest_idx) == 0:
continue
else:
distance = tf.where(interacted_w_shortest_idx, dist, distance)
# its fine, weird indexing
_pt[interacted_w_shortest_idx] = pt
__pt = self.backplane.intersect(rays)
interacted_idx = tf.greater(__pt.interact_num, rays.interact_num)
dist = mag(rays.p0 - __pt.p0) # get the distance
interacted_w_shortest_idx = tf.logical_and(interacted_idx, tf.less(dist, distance))
_pt[interacted_w_shortest_idx] = __pt
return _pt
class Detector:
"""
A detector lying on a horizontal plane
"""
def __init__(self, center, resolution, pixel_width, reflectivity=0.):
"""
A class for retengular detector
:param center: 3D vectors for the center of the base
:type center: tf.Tensor
:param resolution: number of pyramid at each side
:type resolution: tuple
:param pixel_width: width of the pixel
:type pixel_width: float
:param reflectivity: Reflectivity of the surface
:type reflectivity: float
"""
self.center = tf.cast(center, precision) # detector center
self.pixel_width = tf.cast(pixel_width, precision) # pixel width
self.resolution = resolution # resolution (W x H)
self.num_pixel = tf.reduce_prod(self.resolution)
self.x, self.y = self.pixels_locations()
self.reflectivity = reflectivity
self.top_left = self.center + tf.stack([-1. * self.pixel_width * self.resolution[0] / 2., self.pixel_width * self.resolution[1] / 2., 0.])
self.top_right = self.center + tf.stack([self.pixel_width * self.resolution[0] / 2., self.pixel_width * self.resolution[1] / 2., 0.])
self.bottom_left = self.center + tf.stack([-1. * self.pixel_width * self.resolution[0] / 2., -1. * self.pixel_width * self.resolution[1] / 2., 0.])
self.bottom_right = self.center + tf.stack([self.pixel_width * self.resolution[0] / 2., -1. * self.pixel_width * self.resolution[1] / 2., 0.])
self.u = self.top_right - self.top_left
self.v = self.bottom_left - self.top_left
self.normal = norm(tf.linalg.cross(self.u, self.v))
self.plane = Plane(self.top_left, self.top_right, self.bottom_right, self.bottom_left)
def pixels_locations(self):
physical_w = self.pixel_width * self.resolution[0]
physical_h = self.pixel_width * self.resolution[1]
all_w = physical_w / 2. - (tf.linspace(tf.constant(0., dtype=precision),
tf.constant(self.resolution[0]-1., dtype=precision),
self.resolution[0]) * self.pixel_width) - self.pixel_width / 2.
all_h = physical_h / 2. - (tf.linspace(tf.constant(0., dtype=precision),
tf.constant(self.resolution[1]-1., dtype=precision),
self.resolution[1]) * self.pixel_width) - self.pixel_width / 2.
x, y = tf.meshgrid(all_w, all_h)
return x, y
def get_random_rays_from_pixel(self, i, num=1):
assert i < self.num_pixel
i = np.unravel_index(i, self.resolution)
xi = tf.random.uniform([num, 1], self.x[i]-self.pixel_width/2, self.x[i]+self.pixel_width/2, precision)
yi = tf.random.uniform([num, 1], self.y[i]-self.pixel_width/2, self.y[i]+self.pixel_width/2, precision)
xdirecti = tf.random.uniform([num, 1], -10., 10., precision)
ydirecti = tf.random.uniform([num, 1], -10., 10., precision)
# hard code x-y direction minium to not waste ray??
return Ray(self.center + tf.concat([xi, yi, tf.zeros((num, 1), dtype=precision)], 1),
tf.concat([xdirecti, ydirecti, tf.ones((num, 1), dtype=precision)*-1.], 1),
intensity=tf.ones(num, dtype=precision),
interact_num=tf.zeros(num, dtype=tf.int32))
def intersect(self, rays):
return self.plane.intersect(rays)
| [
"tensorflow.meshgrid",
"tensorflow.shape",
"tensorflow.reduce_sum",
"tensorflow.logical_not",
"tensorflow.linalg.cross",
"tensorflow.config.list_physical_devices",
"tensorflow.math.atan2",
"copy.deepcopy",
"tensorflow.ones_like",
"tensorflow.cast",
"tensorflow.tensordot",
"tensorflow.not_equal... | [((271, 309), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (302, 309), True, 'import tensorflow as tf\n'), ((523, 558), 'tensorflow.constant', 'tf.constant', (['np.pi'], {'dtype': 'precision'}), '(np.pi, dtype=precision)\n', (534, 558), True, 'import tensorflow as tf\n'), ((636, 665), 'tensorflow.constant', 'tf.constant', (['(1e-07)', 'precision'], {}), '(1e-07, precision)\n', (647, 665), True, 'import tensorflow as tf\n'), ((355, 406), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (395, 406), True, 'import tensorflow as tf\n'), ((711, 740), 'tensorflow.constant', 'tf.constant', (['(1e-15)', 'precision'], {}), '(1e-15, precision)\n', (722, 740), True, 'import tensorflow as tf\n'), ((1590, 1618), 'tensorflow.zeros_like', 'tf.zeros_like', (['ray_direction'], {}), '(ray_direction)\n', (1603, 1618), True, 'import tensorflow as tf\n'), ((2085, 2110), 'tensorflow.expand_dims', 'tf.expand_dims', (['tensor', '(0)'], {}), '(tensor, 0)\n', (2099, 2110), True, 'import tensorflow as tf\n'), ((2185, 2234), 'tensorflow.math.atan2', 'tf.math.atan2', (['(_norm[:, 0] + epsilon)', '_norm[:, 1]'], {}), '(_norm[:, 0] + epsilon, _norm[:, 1])\n', (2198, 2234), True, 'import tensorflow as tf\n'), ((2236, 2261), 'tensorflow.math.acos', 'tf.math.acos', (['_norm[:, 2]'], {}), '(_norm[:, 2])\n', (2248, 2261), True, 'import tensorflow as tf\n'), ((4271, 4285), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (4279, 4285), False, 'from copy import deepcopy\n'), ((4971, 4993), 'tensorflow.cast', 'tf.cast', (['v0', 'precision'], {}), '(v0, precision)\n', (4978, 4993), True, 'import tensorflow as tf\n'), ((5012, 5034), 'tensorflow.cast', 'tf.cast', (['v1', 'precision'], {}), '(v1, precision)\n', (5019, 5034), True, 'import tensorflow as tf\n'), ((5053, 5075), 'tensorflow.cast', 'tf.cast', (['v2', 'precision'], {}), '(v2, precision)\n', (5060, 5075), True, 'import tensorflow as tf\n'), ((5311, 5348), 'tensorflow.stack', 'tf.stack', (['[self.v0, self.v1, self.v2]'], {}), '([self.v0, self.v1, self.v2])\n', (5319, 5348), True, 'import tensorflow as tf\n'), ((5604, 5644), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(tiled_normal * rays.p1)', '(1)'], {}), '(tiled_normal * rays.p1, 1)\n', (5617, 5644), True, 'import tensorflow as tf\n'), ((5655, 5707), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(tiled_normal * (self.v0 - rays.p0))', '(1)'], {}), '(tiled_normal * (self.v0 - rays.p0), 1)\n', (5668, 5707), True, 'import tensorflow as tf\n'), ((5926, 5960), 'tensorflow.logical_and', 'tf.logical_and', (['cond_0_1', 'cond_0_2'], {}), '(cond_0_1, cond_0_2)\n', (5940, 5960), True, 'import tensorflow as tf\n'), ((6274, 6303), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(w * tiled_v)', '(1)'], {}), '(w * tiled_v, 1)\n', (6287, 6303), True, 'import tensorflow as tf\n'), ((6319, 6348), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(w * tiled_u)', '(1)'], {}), '(w * tiled_u, 1)\n', (6332, 6348), True, 'import tensorflow as tf\n'), ((6373, 6404), 'tensorflow.tensordot', 'tf.tensordot', (['self.u', 'self.v', '(1)'], {}), '(self.u, self.v, 1)\n', (6385, 6404), True, 'import tensorflow as tf\n'), ((6422, 6453), 'tensorflow.tensordot', 'tf.tensordot', (['self.u', 'self.u', '(1)'], {}), '(self.u, self.u, 1)\n', (6434, 6453), True, 'import tensorflow as tf\n'), ((6471, 6502), 'tensorflow.tensordot', 'tf.tensordot', (['self.v', 'self.v', '(1)'], {}), '(self.v, self.v, 1)\n', (6483, 6502), True, 'import tensorflow as tf\n'), ((6821, 6837), 'tensorflow.less', 'tf.less', (['si', '(0.0)'], {}), '(si, 0.0)\n', (6828, 6837), True, 'import tensorflow as tf\n'), ((6854, 6873), 'tensorflow.greater', 'tf.greater', (['si', '(1.0)'], {}), '(si, 1.0)\n', (6864, 6873), True, 'import tensorflow as tf\n'), ((6890, 6906), 'tensorflow.less', 'tf.less', (['ti', '(0.0)'], {}), '(ti, 0.0)\n', (6897, 6906), True, 'import tensorflow as tf\n'), ((6923, 6947), 'tensorflow.greater', 'tf.greater', (['(si + ti)', '(1.0)'], {}), '(si + ti, 1.0)\n', (6933, 6947), True, 'import tensorflow as tf\n'), ((7271, 7323), 'tensorflow.where', 'tf.where', (['no_interaction_idx_3', 'rays.p0', 'p_intersect'], {}), '(no_interaction_idx_3, rays.p0, p_intersect)\n', (7279, 7323), True, 'import tensorflow as tf\n'), ((7348, 7402), 'tensorflow.where', 'tf.where', (['no_interaction_idx_3', 'rays.p1', 'ray_direction'], {}), '(no_interaction_idx_3, rays.p1, ray_direction)\n', (7356, 7402), True, 'import tensorflow as tf\n'), ((7430, 7500), 'tensorflow.where', 'tf.where', (['no_interaction_idx', 'rays.interact_num', '(rays.interact_num + 1)'], {}), '(no_interaction_idx, rays.interact_num, rays.interact_num + 1)\n', (7438, 7500), True, 'import tensorflow as tf\n'), ((7523, 7608), 'tensorflow.where', 'tf.where', (['no_interaction_idx', 'rays.intensity', '(rays.intensity * self.reflectivity)'], {}), '(no_interaction_idx, rays.intensity, rays.intensity * self.reflectivity\n )\n', (7531, 7608), True, 'import tensorflow as tf\n'), ((8315, 8337), 'tensorflow.cast', 'tf.cast', (['v0', 'precision'], {}), '(v0, precision)\n', (8322, 8337), True, 'import tensorflow as tf\n'), ((8356, 8378), 'tensorflow.cast', 'tf.cast', (['v1', 'precision'], {}), '(v1, precision)\n', (8363, 8378), True, 'import tensorflow as tf\n'), ((8397, 8419), 'tensorflow.cast', 'tf.cast', (['v2', 'precision'], {}), '(v2, precision)\n', (8404, 8419), True, 'import tensorflow as tf\n'), ((8438, 8460), 'tensorflow.cast', 'tf.cast', (['v3', 'precision'], {}), '(v3, precision)\n', (8445, 8460), True, 'import tensorflow as tf\n'), ((8696, 8742), 'tensorflow.stack', 'tf.stack', (['[self.v0, self.v1, self.v2, self.v3]'], {}), '([self.v0, self.v1, self.v2, self.v3])\n', (8704, 8742), True, 'import tensorflow as tf\n'), ((8990, 9030), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(tiled_normal * rays.p1)', '(1)'], {}), '(tiled_normal * rays.p1, 1)\n', (9003, 9030), True, 'import tensorflow as tf\n'), ((9041, 9093), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(tiled_normal * (self.v0 - rays.p0))', '(1)'], {}), '(tiled_normal * (self.v0 - rays.p0), 1)\n', (9054, 9093), True, 'import tensorflow as tf\n'), ((9303, 9337), 'tensorflow.logical_and', 'tf.logical_and', (['cond_0_1', 'cond_0_2'], {}), '(cond_0_1, cond_0_2)\n', (9317, 9337), True, 'import tensorflow as tf\n'), ((9581, 9610), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(w * tiled_v)', '(1)'], {}), '(w * tiled_v, 1)\n', (9594, 9610), True, 'import tensorflow as tf\n'), ((9626, 9655), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(w * tiled_u)', '(1)'], {}), '(w * tiled_u, 1)\n', (9639, 9655), True, 'import tensorflow as tf\n'), ((9680, 9711), 'tensorflow.tensordot', 'tf.tensordot', (['self.u', 'self.v', '(1)'], {}), '(self.u, self.v, 1)\n', (9692, 9711), True, 'import tensorflow as tf\n'), ((9729, 9760), 'tensorflow.tensordot', 'tf.tensordot', (['self.u', 'self.u', '(1)'], {}), '(self.u, self.u, 1)\n', (9741, 9760), True, 'import tensorflow as tf\n'), ((9778, 9809), 'tensorflow.tensordot', 'tf.tensordot', (['self.v', 'self.v', '(1)'], {}), '(self.v, self.v, 1)\n', (9790, 9809), True, 'import tensorflow as tf\n'), ((10127, 10143), 'tensorflow.less', 'tf.less', (['si', '(0.0)'], {}), '(si, 0.0)\n', (10134, 10143), True, 'import tensorflow as tf\n'), ((10160, 10179), 'tensorflow.greater', 'tf.greater', (['si', '(1.0)'], {}), '(si, 1.0)\n', (10170, 10179), True, 'import tensorflow as tf\n'), ((10196, 10212), 'tensorflow.less', 'tf.less', (['ti', '(0.0)'], {}), '(ti, 0.0)\n', (10203, 10212), True, 'import tensorflow as tf\n'), ((10229, 10248), 'tensorflow.greater', 'tf.greater', (['ti', '(1.0)'], {}), '(ti, 1.0)\n', (10239, 10248), True, 'import tensorflow as tf\n'), ((10579, 10631), 'tensorflow.where', 'tf.where', (['no_interaction_idx_3', 'rays.p0', 'p_intersect'], {}), '(no_interaction_idx_3, rays.p0, p_intersect)\n', (10587, 10631), True, 'import tensorflow as tf\n'), ((10656, 10710), 'tensorflow.where', 'tf.where', (['no_interaction_idx_3', 'rays.p1', 'ray_direction'], {}), '(no_interaction_idx_3, rays.p1, ray_direction)\n', (10664, 10710), True, 'import tensorflow as tf\n'), ((10738, 10808), 'tensorflow.where', 'tf.where', (['no_interaction_idx', 'rays.interact_num', '(rays.interact_num + 1)'], {}), '(no_interaction_idx, rays.interact_num, rays.interact_num + 1)\n', (10746, 10808), True, 'import tensorflow as tf\n'), ((10831, 10916), 'tensorflow.where', 'tf.where', (['no_interaction_idx', 'rays.intensity', '(rays.intensity * self.reflectivity)'], {}), '(no_interaction_idx, rays.intensity, rays.intensity * self.reflectivity\n )\n', (10839, 10916), True, 'import tensorflow as tf\n'), ((11493, 11519), 'tensorflow.cast', 'tf.cast', (['center', 'precision'], {}), '(center, precision)\n', (11500, 11519), True, 'import tensorflow as tf\n'), ((11571, 11596), 'tensorflow.cast', 'tf.cast', (['width', 'precision'], {}), '(width, precision)\n', (11578, 11596), True, 'import tensorflow as tf\n'), ((11648, 11674), 'tensorflow.cast', 'tf.cast', (['height', 'precision'], {}), '(height, precision)\n', (11655, 11674), True, 'import tensorflow as tf\n'), ((12205, 12300), 'tensorflow.stack', 'tf.stack', (['[self.top_left, self.top_right, self.bottom_right, self.bottom_left, self.top_v\n ]'], {}), '([self.top_left, self.top_right, self.bottom_right, self.\n bottom_left, self.top_v])\n', (12213, 12300), True, 'import tensorflow as tf\n'), ((14110, 14136), 'tensorflow.cast', 'tf.cast', (['center', 'precision'], {}), '(center, precision)\n', (14117, 14136), True, 'import tensorflow as tf\n'), ((14159, 14185), 'tensorflow.cast', 'tf.cast', (['radius', 'precision'], {}), '(radius, precision)\n', (14166, 14185), True, 'import tensorflow as tf\n'), ((14208, 14234), 'tensorflow.cast', 'tf.cast', (['height', 'precision'], {}), '(height, precision)\n', (14215, 14234), True, 'import tensorflow as tf\n'), ((14464, 14498), 'tensorflow.atan', 'tf.atan', (['(self.radius / self.height)'], {}), '(self.radius / self.height)\n', (14471, 14498), True, 'import tensorflow as tf\n'), ((14603, 14621), 'tensorflow.stack', 'tf.stack', (['[self.c]'], {}), '([self.c])\n', (14611, 14621), True, 'import tensorflow as tf\n'), ((14924, 14959), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(rays.p1 * tiled_v)', '(1)'], {}), '(rays.p1 * tiled_v, 1)\n', (14937, 14959), True, 'import tensorflow as tf\n'), ((15747, 15775), 'tensorflow.where', 'tf.where', (['good_t_idx', 't2', 't1'], {}), '(good_t_idx, t2, t1)\n', (15755, 15775), True, 'import tensorflow as tf\n'), ((15792, 15820), 'tensorflow.where', 'tf.where', (['good_t_idx', 't1', 't2'], {}), '(good_t_idx, t1, t2)\n', (15800, 15820), True, 'import tensorflow as tf\n'), ((15963, 15997), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(bad_cp * tiled_v)', '(1)'], {}), '(bad_cp * tiled_v, 1)\n', (15976, 15997), True, 'import tensorflow as tf\n'), ((16208, 16238), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(cp * tiled_v)', '(1)'], {}), '(cp * tiled_v, 1)\n', (16221, 16238), True, 'import tensorflow as tf\n'), ((16340, 16372), 'tensorflow.not_equal', 'tf.not_equal', (['cond_3', 'bad_cond_3'], {}), '(cond_3, bad_cond_3)\n', (16352, 16372), True, 'import tensorflow as tf\n'), ((16586, 16616), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(cp * tiled_v)', '(1)'], {}), '(cp * tiled_v, 1)\n', (16599, 16616), True, 'import tensorflow as tf\n'), ((16915, 16932), 'tensorflow.less', 'tf.less', (['det', '(0.0)'], {}), '(det, 0.0)\n', (16922, 16932), True, 'import tensorflow as tf\n'), ((16949, 16970), 'tensorflow.less_equal', 'tf.less_equal', (['t', '(0.0)'], {}), '(t, 0.0)\n', (16962, 16970), True, 'import tensorflow as tf\n'), ((17281, 17333), 'tensorflow.where', 'tf.where', (['no_interaction_idx_3', 'rays.p0', 'p_intersect'], {}), '(no_interaction_idx_3, rays.p0, p_intersect)\n', (17289, 17333), True, 'import tensorflow as tf\n'), ((17358, 17412), 'tensorflow.where', 'tf.where', (['no_interaction_idx_3', 'rays.p1', 'ray_direction'], {}), '(no_interaction_idx_3, rays.p1, ray_direction)\n', (17366, 17412), True, 'import tensorflow as tf\n'), ((17440, 17510), 'tensorflow.where', 'tf.where', (['no_interaction_idx', 'rays.interact_num', '(rays.interact_num + 1)'], {}), '(no_interaction_idx, rays.interact_num, rays.interact_num + 1)\n', (17448, 17510), True, 'import tensorflow as tf\n'), ((17535, 17620), 'tensorflow.where', 'tf.where', (['no_interaction_idx', 'rays.intensity', '(rays.intensity * self.reflectivity)'], {}), '(no_interaction_idx, rays.intensity, rays.intensity * self.reflectivity\n )\n', (17543, 17620), True, 'import tensorflow as tf\n'), ((18400, 18426), 'tensorflow.cast', 'tf.cast', (['center', 'precision'], {}), '(center, precision)\n', (18407, 18426), True, 'import tensorflow as tf\n'), ((18467, 18492), 'tensorflow.cast', 'tf.cast', (['width', 'precision'], {}), '(width, precision)\n', (18474, 18492), True, 'import tensorflow as tf\n'), ((18530, 18556), 'tensorflow.cast', 'tf.cast', (['height', 'precision'], {}), '(height, precision)\n', (18537, 18556), True, 'import tensorflow as tf\n'), ((18765, 18796), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['self.resolution'], {}), '(self.resolution)\n', (18779, 18796), True, 'import tensorflow as tf\n'), ((20982, 21007), 'tensorflow.meshgrid', 'tf.meshgrid', (['all_w', 'all_h'], {}), '(all_w, all_h)\n', (20993, 21007), True, 'import tensorflow as tf\n'), ((21125, 21161), 'numpy.unravel_index', 'np.unravel_index', (['i', 'self.resolution'], {}), '(i, self.resolution)\n', (21141, 21161), True, 'import numpy as np\n'), ((22169, 22217), 'tensorflow.greater', 'tf.greater', (['__pt.interact_num', 'rays.interact_num'], {}), '(__pt.interact_num, rays.interact_num)\n', (22179, 22217), True, 'import tensorflow as tf\n'), ((23110, 23136), 'tensorflow.cast', 'tf.cast', (['center', 'precision'], {}), '(center, precision)\n', (23117, 23136), True, 'import tensorflow as tf\n'), ((23178, 23204), 'tensorflow.cast', 'tf.cast', (['radius', 'precision'], {}), '(radius, precision)\n', (23185, 23204), True, 'import tensorflow as tf\n'), ((23247, 23273), 'tensorflow.cast', 'tf.cast', (['height', 'precision'], {}), '(height, precision)\n', (23254, 23273), True, 'import tensorflow as tf\n'), ((23474, 23505), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['self.resolution'], {}), '(self.resolution)\n', (23488, 23505), True, 'import tensorflow as tf\n'), ((26130, 26155), 'tensorflow.meshgrid', 'tf.meshgrid', (['all_w', 'all_h'], {}), '(all_w, all_h)\n', (26141, 26155), True, 'import tensorflow as tf\n'), ((26265, 26301), 'numpy.unravel_index', 'np.unravel_index', (['i', 'self.resolution'], {}), '(i, self.resolution)\n', (26281, 26301), True, 'import numpy as np\n'), ((27317, 27365), 'tensorflow.greater', 'tf.greater', (['__pt.interact_num', 'rays.interact_num'], {}), '(__pt.interact_num, rays.interact_num)\n', (27327, 27365), True, 'import tensorflow as tf\n'), ((28390, 28416), 'tensorflow.cast', 'tf.cast', (['center', 'precision'], {}), '(center, precision)\n', (28397, 28416), True, 'import tensorflow as tf\n'), ((28458, 28484), 'tensorflow.cast', 'tf.cast', (['radius', 'precision'], {}), '(radius, precision)\n', (28465, 28484), True, 'import tensorflow as tf\n'), ((28531, 28561), 'tensorflow.cast', 'tf.cast', (['coneheight', 'precision'], {}), '(coneheight, precision)\n', (28538, 28561), True, 'import tensorflow as tf\n'), ((28599, 28625), 'tensorflow.cast', 'tf.cast', (['height', 'precision'], {}), '(height, precision)\n', (28606, 28625), True, 'import tensorflow as tf\n'), ((28667, 28692), 'tensorflow.cast', 'tf.cast', (['width', 'precision'], {}), '(width, precision)\n', (28674, 28692), True, 'import tensorflow as tf\n'), ((29641, 29656), 'numpy.zeros', 'np.zeros', (['(99999)'], {}), '(99999)\n', (29649, 29656), True, 'import numpy as np\n'), ((29673, 29688), 'numpy.zeros', 'np.zeros', (['(99999)'], {}), '(99999)\n', (29681, 29688), True, 'import numpy as np\n'), ((31623, 31671), 'tensorflow.greater', 'tf.greater', (['__pt.interact_num', 'rays.interact_num'], {}), '(__pt.interact_num, rays.interact_num)\n', (31633, 31671), True, 'import tensorflow as tf\n'), ((32479, 32505), 'tensorflow.cast', 'tf.cast', (['center', 'precision'], {}), '(center, precision)\n', (32486, 32505), True, 'import tensorflow as tf\n'), ((32552, 32583), 'tensorflow.cast', 'tf.cast', (['pixel_width', 'precision'], {}), '(pixel_width, precision)\n', (32559, 32583), True, 'import tensorflow as tf\n'), ((32692, 32723), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['self.resolution'], {}), '(self.resolution)\n', (32706, 32723), True, 'import tensorflow as tf\n'), ((34487, 34512), 'tensorflow.meshgrid', 'tf.meshgrid', (['all_w', 'all_h'], {}), '(all_w, all_h)\n', (34498, 34512), True, 'import tensorflow as tf\n'), ((34649, 34685), 'numpy.unravel_index', 'np.unravel_index', (['i', 'self.resolution'], {}), '(i, self.resolution)\n', (34665, 34685), True, 'import numpy as np\n'), ((34699, 34810), 'tensorflow.random.uniform', 'tf.random.uniform', (['[num, 1]', '(self.x[i] - self.pixel_width / 2)', '(self.x[i] + self.pixel_width / 2)', 'precision'], {}), '([num, 1], self.x[i] - self.pixel_width / 2, self.x[i] + \n self.pixel_width / 2, precision)\n', (34716, 34810), True, 'import tensorflow as tf\n'), ((34811, 34922), 'tensorflow.random.uniform', 'tf.random.uniform', (['[num, 1]', '(self.y[i] - self.pixel_width / 2)', '(self.y[i] + self.pixel_width / 2)', 'precision'], {}), '([num, 1], self.y[i] - self.pixel_width / 2, self.y[i] + \n self.pixel_width / 2, precision)\n', (34828, 34922), True, 'import tensorflow as tf\n'), ((34929, 34980), 'tensorflow.random.uniform', 'tf.random.uniform', (['[num, 1]', '(-10.0)', '(10.0)', 'precision'], {}), '([num, 1], -10.0, 10.0, precision)\n', (34946, 34980), True, 'import tensorflow as tf\n'), ((34998, 35049), 'tensorflow.random.uniform', 'tf.random.uniform', (['[num, 1]', '(-10.0)', '(10.0)', 'precision'], {}), '([num, 1], -10.0, 10.0, precision)\n', (35015, 35049), True, 'import tensorflow as tf\n'), ((968, 999), 'tensorflow.tensordot', 'tf.tensordot', (['tensor', 'tensor', '(1)'], {}), '(tensor, tensor, 1)\n', (980, 999), True, 'import tensorflow as tf\n'), ((1034, 1067), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(tensor * tensor)', '(1)'], {}), '(tensor * tensor, 1)\n', (1047, 1067), True, 'import tensorflow as tf\n'), ((1542, 1563), 'tensorflow.abs', 'tf.abs', (['ray_direction'], {}), '(ray_direction)\n', (1548, 1563), True, 'import tensorflow as tf\n'), ((2283, 2300), 'tensorflow.less', 'tf.less', (['phi', '(0.0)'], {}), '(phi, 0.0)\n', (2290, 2300), True, 'import tensorflow as tf\n'), ((3106, 3122), 'tensorflow.size', 'tf.size', (['self.p0'], {}), '(self.p0)\n', (3113, 3122), True, 'import tensorflow as tf\n'), ((3124, 3140), 'tensorflow.size', 'tf.size', (['self.p1'], {}), '(self.p1)\n', (3131, 3140), True, 'import tensorflow as tf\n'), ((3217, 3240), 'tensorflow.size', 'tf.size', (['self.intensity'], {}), '(self.intensity)\n', (3224, 3240), True, 'import tensorflow as tf\n'), ((3317, 3343), 'tensorflow.size', 'tf.size', (['self.interact_num'], {}), '(self.interact_num)\n', (3324, 3343), True, 'import tensorflow as tf\n'), ((3704, 3738), 'tensorflow.where', 'tf.where', (['key_3', 'value.p0', 'self.p0'], {}), '(key_3, value.p0, self.p0)\n', (3712, 3738), True, 'import tensorflow as tf\n'), ((3761, 3795), 'tensorflow.where', 'tf.where', (['key_3', 'value.p1', 'self.p1'], {}), '(key_3, value.p1, self.p1)\n', (3769, 3795), True, 'import tensorflow as tf\n'), ((3825, 3871), 'tensorflow.where', 'tf.where', (['key', 'value.intensity', 'self.intensity'], {}), '(key, value.intensity, self.intensity)\n', (3833, 3871), True, 'import tensorflow as tf\n'), ((3904, 3956), 'tensorflow.where', 'tf.where', (['key', 'value.interact_num', 'self.interact_num'], {}), '(key, value.interact_num, self.interact_num)\n', (3912, 3956), True, 'import tensorflow as tf\n'), ((4189, 4205), 'tensorflow.size', 'tf.size', (['self.p0'], {}), '(self.p0)\n', (4196, 4205), True, 'import tensorflow as tf\n'), ((5224, 5255), 'tensorflow.linalg.cross', 'tf.linalg.cross', (['self.u', 'self.v'], {}), '(self.u, self.v)\n', (5239, 5255), True, 'import tensorflow as tf\n'), ((5839, 5848), 'tensorflow.abs', 'tf.abs', (['b'], {}), '(b)\n', (5845, 5848), True, 'import tensorflow as tf\n'), ((5889, 5898), 'tensorflow.abs', 'tf.abs', (['a'], {}), '(a)\n', (5895, 5898), True, 'import tensorflow as tf\n'), ((6127, 6144), 'tensorflow.zeros_like', 'tf.zeros_like', (['rI'], {}), '(rI)\n', (6140, 6144), True, 'import tensorflow as tf\n'), ((6784, 6798), 'tensorflow.squeeze', 'tf.squeeze', (['rI'], {}), '(rI)\n', (6794, 6798), True, 'import tensorflow as tf\n'), ((8609, 8640), 'tensorflow.linalg.cross', 'tf.linalg.cross', (['self.u', 'self.v'], {}), '(self.u, self.v)\n', (8624, 8640), True, 'import tensorflow as tf\n'), ((9216, 9225), 'tensorflow.abs', 'tf.abs', (['b'], {}), '(b)\n', (9222, 9225), True, 'import tensorflow as tf\n'), ((9266, 9275), 'tensorflow.abs', 'tf.abs', (['a'], {}), '(a)\n', (9272, 9275), True, 'import tensorflow as tf\n'), ((10085, 10099), 'tensorflow.squeeze', 'tf.squeeze', (['rI'], {}), '(rI)\n', (10095, 10099), True, 'import tensorflow as tf\n'), ((11763, 11821), 'tensorflow.stack', 'tf.stack', (['[-1.0 * self.width / 2.0, self.width / 2.0, 0.0]'], {}), '([-1.0 * self.width / 2.0, self.width / 2.0, 0.0])\n', (11771, 11821), True, 'import tensorflow as tf\n'), ((11857, 11908), 'tensorflow.stack', 'tf.stack', (['[self.width / 2.0, self.width / 2.0, 0.0]'], {}), '([self.width / 2.0, self.width / 2.0, 0.0])\n', (11865, 11908), True, 'import tensorflow as tf\n'), ((11947, 12012), 'tensorflow.stack', 'tf.stack', (['[-1.0 * self.width / 2.0, -1.0 * self.width / 2.0, 0.0]'], {}), '([-1.0 * self.width / 2.0, -1.0 * self.width / 2.0, 0.0])\n', (11955, 12012), True, 'import tensorflow as tf\n'), ((12050, 12108), 'tensorflow.stack', 'tf.stack', (['[self.width / 2.0, -1.0 * self.width / 2.0, 0.0]'], {}), '([self.width / 2.0, -1.0 * self.width / 2.0, 0.0])\n', (12058, 12108), True, 'import tensorflow as tf\n'), ((12140, 12173), 'tensorflow.stack', 'tf.stack', (['[0.0, 0.0, self.height]'], {}), '([0.0, 0.0, self.height])\n', (12148, 12173), True, 'import tensorflow as tf\n'), ((13043, 13089), 'tensorflow.greater', 'tf.greater', (['pt.interact_num', 'rays.interact_num'], {}), '(pt.interact_num, rays.interact_num)\n', (13053, 13089), True, 'import tensorflow as tf\n'), ((14523, 14545), 'tensorflow.cos', 'tf.cos', (['self.halfangle'], {}), '(self.halfangle)\n', (14529, 14545), True, 'import tensorflow as tf\n'), ((15299, 15319), 'tensorflow.greater', 'tf.greater', (['det', '(0.0)'], {}), '(det, 0.0)\n', (15309, 15319), True, 'import tensorflow as tf\n'), ((15320, 15332), 'tensorflow.sqrt', 'tf.sqrt', (['det'], {}), '(det)\n', (15327, 15332), True, 'import tensorflow as tf\n'), ((15532, 15549), 'tensorflow.zeros_like', 'tf.zeros_like', (['t1'], {}), '(t1)\n', (15545, 15549), True, 'import tensorflow as tf\n'), ((15610, 15627), 'tensorflow.zeros_like', 'tf.zeros_like', (['t2'], {}), '(t2)\n', (15623, 15627), True, 'import tensorflow as tf\n'), ((15665, 15681), 'tensorflow.less', 'tf.less', (['t1', '(0.0)'], {}), '(t1, 0.0)\n', (15672, 15681), True, 'import tensorflow as tf\n'), ((16033, 16052), 'tensorflow.less', 'tf.less', (['bad_h', '(0.0)'], {}), '(bad_h, 0.0)\n', (16040, 16052), True, 'import tensorflow as tf\n'), ((16053, 16083), 'tensorflow.greater', 'tf.greater', (['bad_h', 'self.height'], {}), '(bad_h, self.height)\n', (16063, 16083), True, 'import tensorflow as tf\n'), ((16270, 16285), 'tensorflow.less', 'tf.less', (['h', '(0.0)'], {}), '(h, 0.0)\n', (16277, 16285), True, 'import tensorflow as tf\n'), ((16286, 16312), 'tensorflow.greater', 'tf.greater', (['h', 'self.height'], {}), '(h, self.height)\n', (16296, 16312), True, 'import tensorflow as tf\n'), ((16648, 16663), 'tensorflow.less', 'tf.less', (['h', '(0.0)'], {}), '(h, 0.0)\n', (16655, 16663), True, 'import tensorflow as tf\n'), ((16664, 16690), 'tensorflow.greater', 'tf.greater', (['h', 'self.height'], {}), '(h, self.height)\n', (16674, 16690), True, 'import tensorflow as tf\n'), ((17014, 17043), 'tensorflow.logical_or', 'tf.logical_or', (['cond_1', 'cond_2'], {}), '(cond_1, cond_2)\n', (17027, 17043), True, 'import tensorflow as tf\n'), ((19114, 19258), 'tensorflow.stack', 'tf.stack', (['[-1.0 * self.width * self.resolution[0] / 2.0 - self.x_append / 2, self.\n width * self.resolution[1] / 2.0 + self.y_append / 2, 0.0]'], {}), '([-1.0 * self.width * self.resolution[0] / 2.0 - self.x_append / 2,\n self.width * self.resolution[1] / 2.0 + self.y_append / 2, 0.0])\n', (19122, 19258), True, 'import tensorflow as tf\n'), ((19286, 19424), 'tensorflow.stack', 'tf.stack', (['[self.width * self.resolution[0] / 2.0 + self.x_append / 2, self.width *\n self.resolution[1] / 2.0 + self.x_append / 2, 0.0]'], {}), '([self.width * self.resolution[0] / 2.0 + self.x_append / 2, self.\n width * self.resolution[1] / 2.0 + self.x_append / 2, 0.0])\n', (19294, 19424), True, 'import tensorflow as tf\n'), ((19454, 19605), 'tensorflow.stack', 'tf.stack', (['[-1.0 * self.width * self.resolution[0] / 2.0 - self.x_append / 2, -1.0 *\n self.width * self.resolution[1] / 2.0 - self.x_append / 2, 0.0]'], {}), '([-1.0 * self.width * self.resolution[0] / 2.0 - self.x_append / 2,\n -1.0 * self.width * self.resolution[1] / 2.0 - self.x_append / 2, 0.0])\n', (19462, 19605), True, 'import tensorflow as tf\n'), ((19635, 19779), 'tensorflow.stack', 'tf.stack', (['[self.width * self.resolution[0] / 2.0 + self.x_append / 2, -1.0 * self.\n width * self.resolution[1] / 2.0 - self.y_append / 2, 0.0]'], {}), '([self.width * self.resolution[0] / 2.0 + self.x_append / 2, -1.0 *\n self.width * self.resolution[1] / 2.0 - self.y_append / 2, 0.0])\n', (19643, 19779), True, 'import tensorflow as tf\n'), ((21610, 21656), 'tensorflow.greater', 'tf.greater', (['pt.interact_num', 'rays.interact_num'], {}), '(pt.interact_num, rays.interact_num)\n', (21620, 21656), True, 'import tensorflow as tf\n'), ((22341, 22364), 'tensorflow.less', 'tf.less', (['dist', 'distance'], {}), '(dist, distance)\n', (22348, 22364), True, 'import tensorflow as tf\n'), ((23813, 23965), 'tensorflow.stack', 'tf.stack', (['[-2.0 * self.radius * self.resolution[0] / 2.0 - self.x_append / 2, 2.0 *\n self.radius * self.resolution[1] / 2.0 + self.y_append / 2, 0.0]'], {}), '([-2.0 * self.radius * self.resolution[0] / 2.0 - self.x_append / 2,\n 2.0 * self.radius * self.resolution[1] / 2.0 + self.y_append / 2, 0.0])\n', (23821, 23965), True, 'import tensorflow as tf\n'), ((24092, 24243), 'tensorflow.stack', 'tf.stack', (['[2.0 * self.radius * self.resolution[0] / 2.0 + self.x_append / 2, 2.0 *\n self.radius * self.resolution[1] / 2.0 + self.x_append / 2, 0.0]'], {}), '([2.0 * self.radius * self.resolution[0] / 2.0 + self.x_append / 2,\n 2.0 * self.radius * self.resolution[1] / 2.0 + self.x_append / 2, 0.0])\n', (24100, 24243), True, 'import tensorflow as tf\n'), ((24374, 24527), 'tensorflow.stack', 'tf.stack', (['[-2.0 * self.radius * self.resolution[0] / 2.0 - self.x_append / 2, -2.0 *\n self.radius * self.resolution[1] / 2.0 - self.x_append / 2, 0.0]'], {}), '([-2.0 * self.radius * self.resolution[0] / 2.0 - self.x_append / 2,\n -2.0 * self.radius * self.resolution[1] / 2.0 - self.x_append / 2, 0.0])\n', (24382, 24527), True, 'import tensorflow as tf\n'), ((24663, 24815), 'tensorflow.stack', 'tf.stack', (['[2.0 * self.radius * self.resolution[0] / 2.0 + self.x_append / 2, -2.0 *\n self.radius * self.resolution[1] / 2.0 - self.y_append / 2, 0.0]'], {}), '([2.0 * self.radius * self.resolution[0] / 2.0 + self.x_append / 2,\n -2.0 * self.radius * self.resolution[1] / 2.0 - self.y_append / 2, 0.0])\n', (24671, 24815), True, 'import tensorflow as tf\n'), ((26756, 26802), 'tensorflow.greater', 'tf.greater', (['pt.interact_num', 'rays.interact_num'], {}), '(pt.interact_num, rays.interact_num)\n', (26766, 26802), True, 'import tensorflow as tf\n'), ((27491, 27514), 'tensorflow.less', 'tf.less', (['dist', 'distance'], {}), '(dist, distance)\n', (27498, 27514), True, 'import tensorflow as tf\n'), ((28866, 28882), 'tensorflow.shape', 'tf.shape', (['self.x'], {}), '(self.x)\n', (28874, 28882), True, 'import tensorflow as tf\n'), ((28923, 28982), 'tensorflow.stack', 'tf.stack', (['[-1.0 * self.width / 2.0, self.height / 2.0, 0.0]'], {}), '([-1.0 * self.width / 2.0, self.height / 2.0, 0.0])\n', (28931, 28982), True, 'import tensorflow as tf\n'), ((29018, 29070), 'tensorflow.stack', 'tf.stack', (['[self.width / 2.0, self.height / 2.0, 0.0]'], {}), '([self.width / 2.0, self.height / 2.0, 0.0])\n', (29026, 29070), True, 'import tensorflow as tf\n'), ((29109, 29175), 'tensorflow.stack', 'tf.stack', (['[-1.0 * self.width / 2.0, -1.0 * self.height / 2.0, 0.0]'], {}), '([-1.0 * self.width / 2.0, -1.0 * self.height / 2.0, 0.0])\n', (29117, 29175), True, 'import tensorflow as tf\n'), ((29213, 29272), 'tensorflow.stack', 'tf.stack', (['[self.width / 2.0, -1.0 * self.height / 2.0, 0.0]'], {}), '([self.width / 2.0, -1.0 * self.height / 2.0, 0.0])\n', (29221, 29272), True, 'import tensorflow as tf\n'), ((31062, 31108), 'tensorflow.greater', 'tf.greater', (['pt.interact_num', 'rays.interact_num'], {}), '(pt.interact_num, rays.interact_num)\n', (31072, 31108), True, 'import tensorflow as tf\n'), ((31797, 31820), 'tensorflow.less', 'tf.less', (['dist', 'distance'], {}), '(dist, distance)\n', (31804, 31820), True, 'import tensorflow as tf\n'), ((32861, 32978), 'tensorflow.stack', 'tf.stack', (['[-1.0 * self.pixel_width * self.resolution[0] / 2.0, self.pixel_width *\n self.resolution[1] / 2.0, 0.0]'], {}), '([-1.0 * self.pixel_width * self.resolution[0] / 2.0, self.\n pixel_width * self.resolution[1] / 2.0, 0.0])\n', (32869, 32978), True, 'import tensorflow as tf\n'), ((33009, 33118), 'tensorflow.stack', 'tf.stack', (['[self.pixel_width * self.resolution[0] / 2.0, self.pixel_width * self.\n resolution[1] / 2.0, 0.0]'], {}), '([self.pixel_width * self.resolution[0] / 2.0, self.pixel_width *\n self.resolution[1] / 2.0, 0.0])\n', (33017, 33118), True, 'import tensorflow as tf\n'), ((33153, 33277), 'tensorflow.stack', 'tf.stack', (['[-1.0 * self.pixel_width * self.resolution[0] / 2.0, -1.0 * self.\n pixel_width * self.resolution[1] / 2.0, 0.0]'], {}), '([-1.0 * self.pixel_width * self.resolution[0] / 2.0, -1.0 * self.\n pixel_width * self.resolution[1] / 2.0, 0.0])\n', (33161, 33277), True, 'import tensorflow as tf\n'), ((33310, 33427), 'tensorflow.stack', 'tf.stack', (['[self.pixel_width * self.resolution[0] / 2.0, -1.0 * self.pixel_width *\n self.resolution[1] / 2.0, 0.0]'], {}), '([self.pixel_width * self.resolution[0] / 2.0, -1.0 * self.\n pixel_width * self.resolution[1] / 2.0, 0.0])\n', (33318, 33427), True, 'import tensorflow as tf\n'), ((33570, 33601), 'tensorflow.linalg.cross', 'tf.linalg.cross', (['self.u', 'self.v'], {}), '(self.u, self.v)\n', (33585, 33601), True, 'import tensorflow as tf\n'), ((6045, 6061), 'tensorflow.zeros_like', 'tf.zeros_like', (['a'], {}), '(a)\n', (6058, 6061), True, 'import tensorflow as tf\n'), ((6101, 6111), 'tensorflow.abs', 'tf.abs', (['rI'], {}), '(rI)\n', (6107, 6111), True, 'import tensorflow as tf\n'), ((7126, 7163), 'tensorflow.expand_dims', 'tf.expand_dims', (['no_interaction_idx', '(1)'], {}), '(no_interaction_idx, 1)\n', (7140, 7163), True, 'import tensorflow as tf\n'), ((7165, 7202), 'tensorflow.expand_dims', 'tf.expand_dims', (['no_interaction_idx', '(1)'], {}), '(no_interaction_idx, 1)\n', (7179, 7202), True, 'import tensorflow as tf\n'), ((7204, 7241), 'tensorflow.expand_dims', 'tf.expand_dims', (['no_interaction_idx', '(1)'], {}), '(no_interaction_idx, 1)\n', (7218, 7241), True, 'import tensorflow as tf\n'), ((9422, 9438), 'tensorflow.zeros_like', 'tf.zeros_like', (['a'], {}), '(a)\n', (9435, 9438), True, 'import tensorflow as tf\n'), ((10427, 10464), 'tensorflow.expand_dims', 'tf.expand_dims', (['no_interaction_idx', '(1)'], {}), '(no_interaction_idx, 1)\n', (10441, 10464), True, 'import tensorflow as tf\n'), ((10466, 10503), 'tensorflow.expand_dims', 'tf.expand_dims', (['no_interaction_idx', '(1)'], {}), '(no_interaction_idx, 1)\n', (10480, 10503), True, 'import tensorflow as tf\n'), ((10505, 10542), 'tensorflow.expand_dims', 'tf.expand_dims', (['no_interaction_idx', '(1)'], {}), '(no_interaction_idx, 1)\n', (10519, 10542), True, 'import tensorflow as tf\n'), ((13219, 13242), 'tensorflow.less', 'tf.less', (['dist', 'distance'], {}), '(dist, distance)\n', (13226, 13242), True, 'import tensorflow as tf\n'), ((13260, 13308), 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['interacted_w_shortest_idx'], {}), '(interacted_w_shortest_idx)\n', (13281, 13308), True, 'import tensorflow as tf\n'), ((13385, 13436), 'tensorflow.where', 'tf.where', (['interacted_w_shortest_idx', 'dist', 'distance'], {}), '(interacted_w_shortest_idx, dist, distance)\n', (13393, 13436), True, 'import tensorflow as tf\n'), ((14316, 14344), 'tensorflow.stack', 'tf.stack', (['[0.0, 0.0, height]'], {}), '([0.0, 0.0, height])\n', (14324, 14344), True, 'import tensorflow as tf\n'), ((14983, 15018), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(rays.p1 * tiled_v)', '(1)'], {}), '(rays.p1 * tiled_v, 1)\n', (14996, 15018), True, 'import tensorflow as tf\n'), ((15160, 15190), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(co * tiled_v)', '(1)'], {}), '(co * tiled_v, 1)\n', (15173, 15190), True, 'import tensorflow as tf\n'), ((15198, 15223), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(co * co)', '(1)'], {}), '(co * co, 1)\n', (15211, 15223), True, 'import tensorflow as tf\n'), ((15334, 15351), 'tensorflow.ones_like', 'tf.ones_like', (['det'], {}), '(det)\n', (15346, 15351), True, 'import tensorflow as tf\n'), ((15506, 15516), 'tensorflow.abs', 'tf.abs', (['t1'], {}), '(t1)\n', (15512, 15516), True, 'import tensorflow as tf\n'), ((15584, 15594), 'tensorflow.abs', 'tf.abs', (['t2'], {}), '(t2)\n', (15590, 15594), True, 'import tensorflow as tf\n'), ((15697, 15716), 'tensorflow.greater', 'tf.greater', (['t2', '(0.0)'], {}), '(t2, 0.0)\n', (15707, 15716), True, 'import tensorflow as tf\n'), ((15717, 15732), 'tensorflow.less', 'tf.less', (['t2', 't1'], {}), '(t2, t1)\n', (15724, 15732), True, 'import tensorflow as tf\n'), ((15878, 15902), 'tensorflow.expand_dims', 'tf.expand_dims', (['bad_t', '(1)'], {}), '(bad_t, 1)\n', (15892, 15902), True, 'import tensorflow as tf\n'), ((16139, 16159), 'tensorflow.expand_dims', 'tf.expand_dims', (['t', '(1)'], {}), '(t, 1)\n', (16153, 16159), True, 'import tensorflow as tf\n'), ((16425, 16451), 'tensorflow.logical_not', 'tf.logical_not', (['bad_cond_3'], {}), '(bad_cond_3)\n', (16439, 16451), True, 'import tensorflow as tf\n'), ((16517, 16537), 'tensorflow.expand_dims', 'tf.expand_dims', (['t', '(1)'], {}), '(t, 1)\n', (16531, 16537), True, 'import tensorflow as tf\n'), ((17095, 17132), 'tensorflow.expand_dims', 'tf.expand_dims', (['no_interaction_idx', '(1)'], {}), '(no_interaction_idx, 1)\n', (17109, 17132), True, 'import tensorflow as tf\n'), ((17134, 17171), 'tensorflow.expand_dims', 'tf.expand_dims', (['no_interaction_idx', '(1)'], {}), '(no_interaction_idx, 1)\n', (17148, 17171), True, 'import tensorflow as tf\n'), ((17215, 17252), 'tensorflow.expand_dims', 'tf.expand_dims', (['no_interaction_idx', '(1)'], {}), '(no_interaction_idx, 1)\n', (17229, 17252), True, 'import tensorflow as tf\n'), ((21199, 21240), 'tensorflow.concat', 'tf.concat', (['[self.x[i], self.y[i], 0.0]', '(0)'], {}), '([self.x[i], self.y[i], 0.0], 0)\n', (21208, 21240), True, 'import tensorflow as tf\n'), ((21786, 21809), 'tensorflow.less', 'tf.less', (['dist', 'distance'], {}), '(dist, distance)\n', (21793, 21809), True, 'import tensorflow as tf\n'), ((21826, 21874), 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['interacted_w_shortest_idx'], {}), '(interacted_w_shortest_idx)\n', (21847, 21874), True, 'import tensorflow as tf\n'), ((21951, 22002), 'tensorflow.where', 'tf.where', (['interacted_w_shortest_idx', 'dist', 'distance'], {}), '(interacted_w_shortest_idx, dist, distance)\n', (21959, 22002), True, 'import tensorflow as tf\n'), ((26336, 26377), 'tensorflow.concat', 'tf.concat', (['[self.x[i], self.y[i], 0.0]', '(0)'], {}), '([self.x[i], self.y[i], 0.0], 0)\n', (26345, 26377), True, 'import tensorflow as tf\n'), ((26934, 26957), 'tensorflow.less', 'tf.less', (['dist', 'distance'], {}), '(dist, distance)\n', (26941, 26957), True, 'import tensorflow as tf\n'), ((26974, 27022), 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['interacted_w_shortest_idx'], {}), '(interacted_w_shortest_idx)\n', (26995, 27022), True, 'import tensorflow as tf\n'), ((27099, 27150), 'tensorflow.where', 'tf.where', (['interacted_w_shortest_idx', 'dist', 'distance'], {}), '(interacted_w_shortest_idx, dist, distance)\n', (27107, 27150), True, 'import tensorflow as tf\n'), ((30638, 30679), 'tensorflow.concat', 'tf.concat', (['[self.x[i], self.y[i], 0.0]', '(0)'], {}), '([self.x[i], self.y[i], 0.0], 0)\n', (30647, 30679), True, 'import tensorflow as tf\n'), ((31240, 31263), 'tensorflow.less', 'tf.less', (['dist', 'distance'], {}), '(dist, distance)\n', (31247, 31263), True, 'import tensorflow as tf\n'), ((31280, 31328), 'tensorflow.math.count_nonzero', 'tf.math.count_nonzero', (['interacted_w_shortest_idx'], {}), '(interacted_w_shortest_idx)\n', (31301, 31328), True, 'import tensorflow as tf\n'), ((31405, 31456), 'tensorflow.where', 'tf.where', (['interacted_w_shortest_idx', 'dist', 'distance'], {}), '(interacted_w_shortest_idx, dist, distance)\n', (31413, 31456), True, 'import tensorflow as tf\n'), ((35336, 35365), 'tensorflow.ones', 'tf.ones', (['num'], {'dtype': 'precision'}), '(num, dtype=precision)\n', (35343, 35365), True, 'import tensorflow as tf\n'), ((35400, 35429), 'tensorflow.zeros', 'tf.zeros', (['num'], {'dtype': 'tf.int32'}), '(num, dtype=tf.int32)\n', (35408, 35429), True, 'import tensorflow as tf\n'), ((1400, 1434), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(normal * rays.p1)', '(1)'], {}), '(normal * rays.p1, 1)\n', (1413, 1434), True, 'import tensorflow as tf\n'), ((1849, 1877), 'tensorflow.less_equal', 'tf.less_equal', (['_mag', 'epsilon'], {}), '(_mag, epsilon)\n', (1862, 1877), True, 'import tensorflow as tf\n'), ((1879, 1897), 'tensorflow.ones_like', 'tf.ones_like', (['_mag'], {}), '(_mag)\n', (1891, 1897), True, 'import tensorflow as tf\n'), ((3606, 3628), 'tensorflow.expand_dims', 'tf.expand_dims', (['key', '(1)'], {}), '(key, 1)\n', (3620, 3628), True, 'import tensorflow as tf\n'), ((3630, 3652), 'tensorflow.expand_dims', 'tf.expand_dims', (['key', '(1)'], {}), '(key, 1)\n', (3644, 3652), True, 'import tensorflow as tf\n'), ((3654, 3676), 'tensorflow.expand_dims', 'tf.expand_dims', (['key', '(1)'], {}), '(key, 1)\n', (3668, 3676), True, 'import tensorflow as tf\n'), ((6021, 6040), 'tensorflow.less', 'tf.less', (['(a / b)', '(0.0)'], {}), '(a / b, 0.0)\n', (6028, 6040), True, 'import tensorflow as tf\n'), ((7027, 7056), 'tensorflow.logical_or', 'tf.logical_or', (['cond_1', 'cond_2'], {}), '(cond_1, cond_2)\n', (7040, 7056), True, 'import tensorflow as tf\n'), ((9398, 9417), 'tensorflow.less', 'tf.less', (['(a / b)', '(0.0)'], {}), '(a / b, 0.0)\n', (9405, 9417), True, 'import tensorflow as tf\n'), ((10328, 10357), 'tensorflow.logical_or', 'tf.logical_or', (['cond_1', 'cond_2'], {}), '(cond_1, cond_2)\n', (10341, 10357), True, 'import tensorflow as tf\n'), ((15065, 15095), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(tiled_v * co)', '(1)'], {}), '(tiled_v * co, 1)\n', (15078, 15095), True, 'import tensorflow as tf\n'), ((15098, 15128), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(co * rays.p1)', '(1)'], {}), '(co * rays.p1, 1)\n', (15111, 15128), True, 'import tensorflow as tf\n'), ((1970, 1998), 'tensorflow.less_equal', 'tf.less_equal', (['_mag', 'epsilon'], {}), '(_mag, epsilon)\n', (1983, 1998), True, 'import tensorflow as tf\n'), ((2000, 2018), 'tensorflow.ones_like', 'tf.ones_like', (['_mag'], {}), '(_mag)\n', (2012, 2018), True, 'import tensorflow as tf\n'), ((20277, 20310), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'precision'}), '(0.0, dtype=precision)\n', (20288, 20310), True, 'import tensorflow as tf\n'), ((20359, 20413), 'tensorflow.constant', 'tf.constant', (['(self.resolution[0] - 1.0)'], {'dtype': 'precision'}), '(self.resolution[0] - 1.0, dtype=precision)\n', (20370, 20413), True, 'import tensorflow as tf\n'), ((20559, 20592), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'precision'}), '(0.0, dtype=precision)\n', (20570, 20592), True, 'import tensorflow as tf\n'), ((20641, 20695), 'tensorflow.constant', 'tf.constant', (['(self.resolution[1] - 1.0)'], {'dtype': 'precision'}), '(self.resolution[1] - 1.0, dtype=precision)\n', (20652, 20695), True, 'import tensorflow as tf\n'), ((30219, 30239), 'numpy.power', 'np.power', (['(cd + cs)', '(2)'], {}), '(cd + cs, 2)\n', (30227, 30239), True, 'import numpy as np\n'), ((33922, 33955), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'precision'}), '(0.0, dtype=precision)\n', (33933, 33955), True, 'import tensorflow as tf\n'), ((34004, 34058), 'tensorflow.constant', 'tf.constant', (['(self.resolution[0] - 1.0)'], {'dtype': 'precision'}), '(self.resolution[0] - 1.0, dtype=precision)\n', (34015, 34058), True, 'import tensorflow as tf\n'), ((34216, 34249), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'precision'}), '(0.0, dtype=precision)\n', (34227, 34249), True, 'import tensorflow as tf\n'), ((34298, 34352), 'tensorflow.constant', 'tf.constant', (['(self.resolution[1] - 1.0)'], {'dtype': 'precision'}), '(self.resolution[1] - 1.0, dtype=precision)\n', (34309, 34352), True, 'import tensorflow as tf\n'), ((35168, 35203), 'tensorflow.zeros', 'tf.zeros', (['(num, 1)'], {'dtype': 'precision'}), '((num, 1), dtype=precision)\n', (35176, 35203), True, 'import tensorflow as tf\n'), ((35261, 35295), 'tensorflow.ones', 'tf.ones', (['(num, 1)'], {'dtype': 'precision'}), '((num, 1), dtype=precision)\n', (35268, 35295), True, 'import tensorflow as tf\n'), ((16746, 16776), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(tiled_v * cp)', '(1)'], {}), '(tiled_v * cp, 1)\n', (16759, 16776), True, 'import tensorflow as tf\n'), ((16779, 16804), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(cp * cp)', '(1)'], {}), '(cp * cp, 1)\n', (16792, 16804), True, 'import tensorflow as tf\n'), ((25429, 25462), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'precision'}), '(0.0, dtype=precision)\n', (25440, 25462), True, 'import tensorflow as tf\n'), ((25510, 25564), 'tensorflow.constant', 'tf.constant', (['(self.resolution[0] - 1.0)'], {'dtype': 'precision'}), '(self.resolution[0] - 1.0, dtype=precision)\n', (25521, 25564), True, 'import tensorflow as tf\n'), ((25713, 25746), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'precision'}), '(0.0, dtype=precision)\n', (25724, 25746), True, 'import tensorflow as tf\n'), ((25794, 25848), 'tensorflow.constant', 'tf.constant', (['(self.resolution[1] - 1.0)'], {'dtype': 'precision'}), '(self.resolution[1] - 1.0, dtype=precision)\n', (25805, 25848), True, 'import tensorflow as tf\n')] |
import numpy as np
import xlwings as xw
"""
To activate a function in the excel file, press "Import Functions"
in the xlwings tab
"""
@xw.func
def get_crack_width(c_nom_tension, c_nom_compression, c_dur, moment, \
is_shear_present, shear_bar_diameter, \
rf_diameter_first_layer = 0, rf_spacing_first_layer = 0, \
rf_diameter_second_layer = 0, rf_spacing_second_layer = 0, \
rf_diameter_third_layer = 0, rf_spacing_third_layer = 0):
check_value = lambda x: 0 if x == None else x
rf_diameter_second_layer = check_value(rf_diameter_second_layer)
rf_spacing_second_layer = check_value(rf_spacing_second_layer)
rf_diameter_third_layer = check_value(rf_diameter_third_layer)
rf_spacing_third_layer = check_value(rf_spacing_third_layer)
if is_shear_present:
c_nom_tension += shear_bar_diameter
c_nom_compression += shear_bar_diameter
c_dur += shear_bar_diameter
try:
section_properrties = SectionProperties(c_nom_tension, c_nom_compression, c_dur,
rf_diameter_first_layer,
rf_spacing_first_layer,
rf_diameter_second_layer,
rf_spacing_second_layer,
rf_diameter_third_layer,
rf_spacing_third_layer
)
serviceability_checks = ServiceabilityChecks(section_properrties, moment)
crack_width_calc = CrackWidthCalc(serviceability_checks)
crack_width = crack_width_calc.get_crack_width()
return crack_width
except BaseException:
return 0
class CrackWidthCalc:
k_1 = 0.8
k_2 = 0.5 #TODO deterministc value, not constant
k_3 = 3.4
k_4 = 0.425
k_t_long_term = 0.4
def __init__(self, serviceability_checks):
self.serviceability_checks = serviceability_checks
self.set_up_properties()
@classmethod
def set_up_properties(cls):
self.equivalent_diameter = cls.get_equivalent_diameter()
self.rho_eff = cls.get_rho_eff()
self.s_rmax = cls.get_s_rmax()
self.e_sm_minus_e_cm = cls.get_e_sm_minus_e_cm()
self.crack_width = cls.get_crack_width()
def get_crack_width(self):
s_rmax = self.s_rmax
e_sm_minus_e_cm = self.get_e_sm_minus_e_cm()
crack_width = s_rmax * e_sm_minus_e_cm
return crack_width
def get_e_sm_minus_e_cm(self):
sigma_s = self.serviceability_checks.longterm_sigma_s
f_ctm = self.serviceability_checks.section_properties.concrete_properties.f_ctm
rho_eff = self.rho_eff
k_t = self.k_t_long_term
E_s = self.serviceability_checks.section_properties.steel_properties.E_s
E_cm = self.serviceability_checks.section_properties.concrete_properties.E_cm
alpha_e = E_s / E_cm
e_sm_minus_e_cm = max(
0.6*( sigma_s / E_s ),
(sigma_s - k_t * (f_ctm/rho_eff) * (1 + alpha_e*rho_eff))/E_s
)
return e_sm_minus_e_cm
def get_s_rmax(self):
spacing = self.serviceability_checks.section_properties.rf_spacing_first_layer
c_dur = self.serviceability_checks.section_properties.c_dur
diam = self.equivalent_diameter
h = self.serviceability_checks.section_properties.depth
d_c = self.serviceability_checks.d_c_lt
rho_eff = self.rho_eff
if spacing > 5 * (c_dur + diam/2):
return 1.3 * (h-d_c)
else:
return self.k_3*c_dur + self.k_1*self.k_2*self.k_4*diam/rho_eff
def get_equivalent_diameter(self):
d_1 = self.serviceability_checks.section_properties.rf_diameter_first_layer
d_2 = self.serviceability_checks.section_properties.rf_diameter_second_layer
d_3 = self.serviceability_checks.section_properties.rf_diameter_third_layer
spacing_1 = self.serviceability_checks.section_properties.rf_spacing_first_layer
spacing_2 = self.serviceability_checks.section_properties.rf_spacing_second_layer
spacing_3 = self.serviceability_checks.section_properties.rf_spacing_third_layer
width = self.serviceability_checks.section_properties.width
get_n = lambda spacing: width / spacing if spacing else 0
n_1 = get_n(spacing_1)
n_2 = get_n(spacing_2)
n_3 = get_n(spacing_3)
return (n_1*d_1**2 + n_2*d_2**2 + n_3*d_3**2) / (n_1*d_1 + n_2*d_2 + n_3*d_3)
def get_rho_eff(self):
d_1 = self.serviceability_checks.section_properties.rf_diameter_first_layer
d_2 = self.serviceability_checks.section_properties.rf_diameter_second_layer
d_3 = self.serviceability_checks.section_properties.rf_diameter_third_layer
width = self.serviceability_checks.section_properties.width
depth = self.serviceability_checks.section_properties.depth
aggregate_size = self.serviceability_checks.section_properties.aggregate_size
d = self.serviceability_checks.section_properties.depth_to_centroid
d_c_lt = self.serviceability_checks.d_c_lt
dist_between_layers_1 = self.serviceability_checks.section_properties.spacing_between_rf if d_2 > 0 else 0
dist_between_layers_2 = self.serviceability_checks.section_properties.spacing_between_rf if d_3 > 0 else 0
A_s = self.serviceability_checks.section_properties.steel_area_tension
c_nom = self.serviceability_checks.section_properties.c_nom_tension
c_dur = self.serviceability_checks.section_properties.c_dur
h_dash = depth - c_nom + c_dur
bar_group = c_dur + d_1 + dist_between_layers_1 + \
d_2 + dist_between_layers_2 + \
d_3 + \
aggregate_size + 5
h_ceff = max(
min((h_dash - d_c_lt)/3, 2.5*(h_dash - d), h_dash/2),
bar_group
)
A_ceff = h_ceff * width
rho_eff = A_s / A_ceff
return rho_eff
def to_string(self):
return f"rho_eff = {self.rho_eff:0.4f}\n" + \
f"equivalent_diameter = {self.equivalent_diameter:0.2f}\n" + \
f"s_rmax = {self.s_rmax:0.2f}\n" + \
f"e_sm_minus_e_cm = {self.e_sm_minus_e_cm:0.2f}\n" + \
f"crack_width = {self.crack_width:0.2f}\n"
class ServiceabilityChecks:
def __init__(self, section_properties, moment):
self.section_properties = section_properties
self.sls_moment_permanent = moment
self.set_up_properties()
@classmethod
def set_up_properties(cls):
cls.E_ceff_short_term = cls.section_properties.concrete_properties.E_cm
cls.E_ceff_long_term = cls.E_ceff_short_term / (1 + cls.section_properties.concrete_properties.creep)
cls.d_c_st = cls.get_concrete_depth_in_compression(cls.E_ceff_short_term)
cls.d_c_lt = cls.get_concrete_depth_in_compression(cls.E_ceff_long_term)
cls.I_cracked_short_term = cls.get_cracked_second_moment_of_area(cls.d_c_st, cls.E_ceff_short_term)
cls.I_cracked_long_term = cls.get_cracked_second_moment_of_area(cls.d_c_lt, cls.E_ceff_long_term)
cls.longterm_sigma_s = cls.get_longterm_sigma_s()
def get_longterm_sigma_s(self):
"""
Stress at the bottom steel face for pure bending:
"""
d = self.section_properties.depth_to_centroid
sigma_s = self.sls_moment_permanent*(10**6) * (d - self.d_c_lt) / self.I_cracked_long_term + \
self.sls_moment_permanent*(10**6) * (d - self.d_c_st) / self.I_cracked_short_term
return sigma_s
def get_cracked_second_moment_of_area(self, d_c, E_ceff):
A_s = self.section_properties.steel_area_tension
d = self.section_properties.depth_to_centroid
E_s = self.section_properties.steel_properties.E_s
b = self.section_properties.width
I_cracked = A_s * (d - d_c)**2 + 1/3 * (E_ceff / E_s) * (b * d_c**3)
return I_cracked
def get_concrete_depth_in_compression(self, effective_modulus_of_concrete):
"""
Concrete depth in compression in pure bending
"""
E_ceff = effective_modulus_of_concrete
A_s = self.section_properties.steel_area_tension
E_s = self.section_properties.steel_properties.E_s
d = self.section_properties.depth_to_centroid
b = self.section_properties.width
d_c = (-A_s*E_s + np.sqrt((A_s*E_s)**2 + 2*b*A_s*E_s*E_ceff*d)) / (b * E_ceff)
return d_c
def to_string(self):
return f"E_ceff_short_term = {self.E_ceff_short_term:0.2f}\n" + \
f"d_c_st = {self.d_c_st:0.2f}\n" + \
f"I_cracked_short_term = {self.I_cracked_short_term:0.2f}\n" + \
f"E_ceff_long_term = {self.E_ceff_long_term:0.2f}\n" + \
f"d_c_lt = {self.d_c_lt:0.2f}\n" + \
f"I_cracked_long_term = {self.I_cracked_long_term:0.2f}\n" + \
f"longterm_sigma_s = {self.longterm_sigma_s:0.2f}\n"
class SectionProperties:
#PROJECT SPECIFIC SECTION PROPERTIES (to be moved to init)
depth = 750
width = 1000
f_yd = 500
f_ck = 35
spacing_between_rf = 25
aggregate_size = 20
def __init__(self, c_nom_tension, c_nom_compression, c_dur,
rf_diameter_first_layer, rf_spacing_first_layer,
rf_diameter_second_layer, rf_spacing_second_layer,
rf_diameter_third_layer, rf_spacing_third_layer):
self.c_nom_tension = c_nom_tension
self.c_nom_compression = c_nom_compression
self.c_dur = c_dur
self.rf_diameter_first_layer = rf_diameter_first_layer
self.rf_spacing_first_layer = rf_spacing_first_layer
self.rf_diameter_second_layer = rf_diameter_second_layer
self.rf_spacing_second_layer = rf_spacing_second_layer
self.rf_diameter_third_layer = rf_diameter_third_layer
self.rf_spacing_third_layer = rf_spacing_third_layer
self.set_up_properties()
def set_up_properties(cls):
self._area_1 = self.get_single_layer_steel_area(self.rf_diameter_first_layer, self.rf_spacing_first_layer)
self._area_2 = cselfls.get_single_layer_steel_area(self.rf_diameter_second_layer, self.rf_spacing_second_layer)
self._area_3 = self.get_single_layer_steel_area(self.rf_diameter_third_layer, self.rf_spacing_third_layer)
self.steel_area_tension = self.get_steel_area_tension()
self.depth_to_centroid = self.get_depth_to_centroid()
self.steel_properties = SteelProperties(self.f_yd)
self.concrete_properties = ConcreteProperties(self.f_ck)
self.depth_to_neutral_axis = self.depth / 2
self.second_moment_of_area = self.width * self.depth**3 / 12
def get_steel_area_tension(self):
return self._area_1 + self._area_2 + self._area_3
def get_depth_to_centroid(self):
d_1 = self.depth - self.c_nom_tension - self.rf_diameter_first_layer/2
d_2 = d_1 - self.rf_diameter_first_layer/2 - self.spacing_between_rf - self.rf_diameter_second_layer/2
d_3 = d_2 - self.rf_diameter_second_layer/2 - self.spacing_between_rf - self.rf_diameter_third_layer/2
return (d_1*self._area_1 + \
d_2*self._area_2 + \
d_3*self._area_3) / self.steel_area_tension
def get_single_layer_steel_area(self, diameter, spacing):
if diameter != 0 and diameter != None:
return (np.pi*diameter**2 / 4) * (self.width / spacing)
else:
return 0
def __str__(self):
return f"steel_area_tension = {self.steel_area_tension:0.2f}\n" + \
f"depth_to_centroid = {self.depth_to_centroid:0.2f}\n"
def __repr__(self):
return f"steel_area_tension = {self.steel_area_tension:0.2f}\n" + \
f"depth_to_centroid = {self.depth_to_centroid:0.2f}\n"
class ConcreteProperties:
gamma_c = 1.5
alpha_cc_flexure = 0.85
alpha_cc_shear = 1
alpha_ct = 1
#TODO values are assumed as context. write a class for creep calculations
relative_humidity = 0.75
exposed_perimter = 1000
age_of_concrete_at_moment_considered = 2557
age_of_concrete_at_loading = 28
creep = 1.256
def __init__(self, f_ck):
self.f_ck = f_ck
self.set_up_properties()
def set_up_properties(self):
self.f_av = self.get_f_av()
self.f_cm = self.get_f_cm()
self.f_ctm = self.get_f_ctm()
self.E_cm = self.get_E_cm()
self.f_cd_flexural = self.get_f_cd_flexural()
self.f_cd_shear = self.get_f_cd_shear()
self.f_ctd = self.get_f_ctd()
def get_f_av(self):
return 0.459*self.f_ck
def get_f_cm(self):
return self.f_ck + 8
def get_f_ctm(self):
if self.f_ck < 50:
return 0.3*self.f_ck**(2/3)
else:
return 2.12*np.log(1 + self.f_cm/10)
def get_E_cm(self):
return 22*((self.f_ck+8)/10)**0.3*1000
def get_f_cd_flexural(self):
return self.f_ck * self.alpha_cc_flexure / self.gamma_c
def get_f_cd_shear(self):
return self.f_ck * self.alpha_cc_shear / self.gamma_c
def get_f_ctd(self):
return 0.7 * self.f_ctm * self.alpha_ct / self.gamma_c
def to_string(self):
return f"f_ck = {self.f_ck:0.2f}\n" + \
f"f_av = {self.f_av:0.2f}\n" + \
f"f_cm = {self.f_cm:0.2f}\n" + \
f"f_ctm = {self.f_ctm:0.2f}\n" + \
f"E_cm = {self.E_cm:0.2f}\n" + \
f"f_cd_flexural = {self.f_cd_flexural:0.2f}\n" + \
f"f_cd_shear = {self.f_cd_shear:0.2f}\n" + \
f"f_ctd = {self.f_ctd:0.2f}\n"
class SteelProperties:
E_s = 200000
gamma_s = 1.15
def __init__(self, f_yk):
self.f_yk = f_yk
self.f_yd = f_yk / self.gamma_s
def __str__(self):
return f"E_s = {self.E_s:0.0f}\n" + \
f"gamma_s = {self.gamma_s:0.2f}\n" + \
f"f_yk = {self.f_yk:0.0f}\n" + \
f"f_yd = {self.f_yd:0.2f}\n"
def __repr__(self):
return f"E_s = {self.E_s:0.0f}\n" + \
f"gamma_s = {self.gamma_s:0.2f}\n" + \
f"f_yk = {self.f_yk:0.0f}\n" + \
f"f_yd = {self.f_yd:0.2f}\n"
| [
"numpy.log",
"numpy.sqrt"
] | [((8361, 8419), 'numpy.sqrt', 'np.sqrt', (['((A_s * E_s) ** 2 + 2 * b * A_s * E_s * E_ceff * d)'], {}), '((A_s * E_s) ** 2 + 2 * b * A_s * E_s * E_ceff * d)\n', (8368, 8419), True, 'import numpy as np\n'), ((12865, 12891), 'numpy.log', 'np.log', (['(1 + self.f_cm / 10)'], {}), '(1 + self.f_cm / 10)\n', (12871, 12891), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import numpy as np
import FrEIA.framework as Ff
from .. import InvertibleArchitecture
__all__ = ['beta_0', 'beta_1', 'beta_2', 'beta_4', 'beta_8', 'beta_16', 'beta_32', 'beta_inf']
model_base_url = 'https://heibox.uni-heidelberg.de/seafhttp/files/6f91503d-7459-4080-b10a-e979f8b3d20f/'
model_urls = {b : f'{model_base_url}{b}.avg.pt' for b in __all__}
class InvertibleImagenetClassifier(InvertibleArchitecture):
def __init__(self, lr, mu_init, mu_conv_init, mu_low_rank_k, input_dims, n_classes, n_loss_dims_1d, n_total_dims_1d, backbone: InvertibleArchitecture, head: InvertibleArchitecture, finetune_mu=False):
super().__init__()
self.model = None
self.lr = lr
self.n_classes = n_classes
self.backbone = backbone
self.head = head
self.construct_inn(Ff.InputNode(input_dims[0], input_dims[1], input_dims[2], name='input'), backbone, head)
self.n_total_dims_1d = n_total_dims_1d
self.n_loss_dims_1d = n_loss_dims_1d
init_scale = mu_init / np.sqrt(2 * (n_loss_dims_1d // n_classes))
self.mu_fc = nn.Parameter(torch.zeros(1, n_classes, n_loss_dims_1d))
for k in range(n_loss_dims_1d // n_classes):
self.mu_fc.data[0, :, n_classes * k: n_classes * (k + 1)] = init_scale * torch.eye(n_classes)
self.mu_low_rank_k = mu_low_rank_k
if self.mu_low_rank_k > 0:
mu_conv_dims = n_total_dims_1d - n_loss_dims_1d
self.mu_t = nn.Parameter(mu_conv_init * torch.randn(self.n_classes, self.mu_low_rank_k).cuda())
self.mu_m = nn.Parameter(mu_conv_init * torch.randn(self.mu_low_rank_k, mu_conv_dims).cuda())
else:
self.mu_conv = nn.Parameter(mu_conv_init * torch.randn(1, n_classes, n_total_dims_1d - n_loss_dims_1d))
self.train_mu = True
self.train_phi = False
self.train_inn = True
self.model_parameters = list(filter(lambda p: p.requires_grad, self.model.parameters()))
self.finetune_mu = finetune_mu
self.optimizer_params = [{
'params': self.model_parameters,
'lr': 0 * self.lr if finetune_mu else 1 * self.lr,
'weight_decay':0.}
]
if self.train_mu:
self.optimizer_params.append({
'params': [self.mu_fc],
'lr': 1. * self.lr,
'weight_decay': 0.
})
self.optimizer_params.append({
'params': [self.mu_m, self.mu_t] if self.mu_low_rank_k > 0 else [self.mu_conv],
'lr': 1. * self.lr,
'weight_decay': 0.
})
if self.train_phi:
self.optimizer_params.append({
'params': [self.phi],
'lr': 1. * self.lr,
'weight_decay': 0.
})
self.optimizer = torch.optim.SGD(self.optimizer_params, self.lr, momentum=0.9, weight_decay=1e-5)
def construct_inn(self, input, backbone: InvertibleArchitecture, head: InvertibleArchitecture):
nodes = []
split_nodes = []
nodes.append(input)
backbone_nodes, backbone_split_nodes, skip_connections = backbone.construct_inn(nodes[-1])
nodes += backbone_nodes
if skip_connections:
print("HAS SKIP CONNECTION")
head_nodes, head_split_nodes = head.construct_inn(nodes[-1], skip_connections)
split_nodes += backbone_split_nodes
else:
head_nodes, head_split_nodes = head.construct_inn(nodes[-1])
nodes.append(Ff.OutputNode(head_nodes[-1], name='out_fc'))
nodes += head_nodes
split_nodes += head_split_nodes
self.model = Ff.ReversibleGraphNet(nodes + split_nodes, verbose=True)
print(self.model)
return nodes
def calc_mu_conv(self):
self.mu_conv = torch.mm(self.mu_t, self.mu_m).unsqueeze(0)
def cluster_distances(self, z, mu):
z_i_z_i = torch.sum(z**2, dim=1, keepdim=True) # batchsize x 1
mu_j_mu_j = torch.sum(mu**2, dim=2) # 1 x n_classes
z_i_mu_j = torch.mm(z, mu.squeeze().t()) # batchsize x n_classes
return -2 * z_i_mu_j + z_i_z_i + mu_j_mu_j
def forward(self, x, y=None):
if self.finetune_mu:
with torch.no_grad():
z_fc, z_conv = self.model(x)
jac = self.model.log_jacobian(run_forward=False)
else:
z_fc, z_conv = self.model(x)
jac = self.model.log_jacobian(run_forward=False)
if self.mu_low_rank_k > 0:
self.calc_mu_conv()
cluster_distances = self.cluster_distances(z_fc, self.mu_fc)
cluster_distances += self.cluster_distances(z_conv, self.mu_conv)
losses = {'nll_joint_tr': ((- torch.logsumexp(- 0.5 * cluster_distances, dim=1)) - jac) / self.n_total_dims_1d, 'logits_tr': - 0.5 * cluster_distances}
if y is not None:
losses['nll_class_tr'] = ((0.5 * torch.sum(cluster_distances * y, dim=1)) - jac) / self.n_total_dims_1d
losses['cat_ce_tr'] = - torch.sum((torch.log_softmax(- 0.5 * cluster_distances, dim=1)) * y, dim=1)
losses['acc_tr'] = torch.mean((torch.argmax(y, dim=1) == torch.argmax(-cluster_distances, dim=1)).float())
for lname in ['nll_joint_tr', 'nll_class_tr', 'cat_ce_tr', 'acc_tr']:
losses[lname] = torch.mean(losses[lname])
return losses
def mu_pairwise_dist(self):
distances = []
for mu in [self.mu_fc, self.mu_conv]:
mu_i_mu_j = mu.squeeze().mm(mu.squeeze().t())
mu_i_mu_i = torch.sum(mu.squeeze()**2, 1, keepdim=True).expand(self.n_classes, self.n_classes)
dist = mu_i_mu_i + mu_i_mu_i.t() - 2 * mu_i_mu_j
dist = torch.masked_select(dist, (1 - torch.eye(self.n_classes).cuda()).byte()).clamp(min=0.)
distances.append(dist)
return distances[0] + distances[1]
def validate(self, x, y):
with torch.no_grad():
losses = self.forward(x, y)
nll_joint, nll_class, cat_ce, acc = (losses['nll_joint_tr'], losses['nll_class_tr'], losses['cat_ce_tr'], losses['acc_tr'])
mu_dist = torch.mean(torch.sqrt(self.mu_pairwise_dist()))
return {'nll_joint_val': nll_joint,
'nll_class_val': nll_class,
'cat_ce_val': cat_ce,
'acc_val': acc,
'delta_mu_val': mu_dist}
def sample(self, y, temperature=1.):
z = temperature * torch.randn(y.shape[0], self.n_loss_dims_1d).cuda()
mu = torch.sum(y.view(-1, self.n_classes, 1) * self.mu, dim=1)
z = z + mu
return self.inn(z, rev=True)
def save(self, fname):
if self.mu_low_rank_k > 0:
torch.save({'inn': self.model.state_dict(),
'mu': self.mu_fc,
'mu_t': self.mu_t,
'mu_m': self.mu_m,
'opt': self.optimizer.state_dict()}, fname)
else:
torch.save({'inn': self.model.state_dict(),
'mu': self.mu_fc,
'mu_conv': self.mu_conv,
'opt': self.optimizer.state_dict()}, fname)
def init_from_data(self, data):
self.model.load_state_dict(data['inn'], strict=True)
self.mu_fc.data.copy_(data['mu'].data)
if hasattr(self, "mu_low_rank_k") and self.mu_low_rank_k > 0:
self.mu_t.data.copy_(data['mu_t'].data)
self.mu_m.data.copy_(data['mu_m'].data)
self.calc_mu_conv()
else:
self.mu_conv.data.copy_(data['mu_conv'].data)
try:
self.optimizer.load_state_dict(data['opt'])
except:
print('Not loading the optimizer')
def load(self, fname):
data = torch.load(fname)
self.init_from_data(data)
from torch.hub import load_state_dict_from_url
from ..backbones.invertible_resnet import InvertibleResNet
from ..heads.invertible_multiclass_classifier import InvertibleMulticlassClassifier
def _trustworthy_gc(beta, layers, pretrained, progress, pretrained_model_path, **kwargs):
beta_str = "beta_" + str(beta)
# Loading and initializing the models made available under:
# https://heibox.uni-heidelberg.de/d/e7b5ba0d30f24cdca416/
backbone = InvertibleResNet(
64,
clamp=0.7,
act_norm=0.7,
blocks=layers,
strides=[1, 2, 2, 2],
dilations=[1, 1, 1, 1],
permute_soft = False
)
head = InvertibleMulticlassClassifier(
1024,
3072,
224*224*3,
clamp=0.7,
act_norm=0.7,
permute_soft=False
)
model = InvertibleImagenetClassifier(0.0, 0.0, 0.0, 128, [3,224,224], 1000, 3072, 224*224*3, backbone, head, finetune_mu=False, **kwargs)
if pretrained:
if pretrained_model_path is None:
data = load_state_dict_from_url(model_urls[beta_str], progress=progress)
model.init_from_data(data)
else:
model.load(pretrained_model_path)
return model
def trustworthy_gc_beta_0(pretrained=False, progress=True, pretrained_model_path=None, **kwargs):
return _trustworthy_gc(0, [3, 4, 6, 3], pretrained, progress, pretrained_model_path, **kwargs)
def trustworthy_gc_beta_1(pretrained=False, progress=True, pretrained_model_path=None, **kwargs):
return _trustworthy_gc(1, [3, 4, 6, 3], pretrained, progress, pretrained_model_path, **kwargs)
def trustworthy_gc_beta_2(pretrained=False, progress=True, pretrained_model_path=None, **kwargs):
return _trustworthy_gc(2, [3, 4, 6, 3], pretrained, progress, pretrained_model_path, **kwargs)
def trustworthy_gc_beta_4(pretrained=False, progress=True, pretrained_model_path=None, **kwargs):
return _trustworthy_gc(4, [3, 4, 6, 3], pretrained, progress, pretrained_model_path, **kwargs)
def trustworthy_gc_beta_8(pretrained=False, progress=True, pretrained_model_path=None, **kwargs):
return _trustworthy_gc(8, [3, 4, 6, 3], pretrained, progress, pretrained_model_path, **kwargs)
def trustworthy_gc_beta_16(pretrained=False, progress=True, pretrained_model_path=None, **kwargs):
return _trustworthy_gc(16, [3, 4, 6, 3], pretrained, progress, pretrained_model_path, **kwargs)
def trustworthy_gc_beta_32(pretrained=False, progress=True, pretrained_model_path=None, **kwargs):
return _trustworthy_gc(32, [3, 4, 6, 3], pretrained, progress, pretrained_model_path, **kwargs)
def trustworthy_gc_beta_inf(pretrained=False, progress=True, pretrained_model_path=None, **kwargs):
return _trustworthy_gc('inf', [3, 4, 6, 3], pretrained, progress, pretrained_model_path, **kwargs)
| [
"torch.optim.SGD",
"FrEIA.framework.ReversibleGraphNet",
"numpy.sqrt",
"torch.log_softmax",
"torch.mean",
"FrEIA.framework.OutputNode",
"torch.load",
"torch.eye",
"FrEIA.framework.InputNode",
"torch.argmax",
"torch.hub.load_state_dict_from_url",
"torch.mm",
"torch.sum",
"torch.logsumexp",
... | [((2890, 2976), 'torch.optim.SGD', 'torch.optim.SGD', (['self.optimizer_params', 'self.lr'], {'momentum': '(0.9)', 'weight_decay': '(1e-05)'}), '(self.optimizer_params, self.lr, momentum=0.9, weight_decay=\n 1e-05)\n', (2905, 2976), False, 'import torch\n'), ((3735, 3791), 'FrEIA.framework.ReversibleGraphNet', 'Ff.ReversibleGraphNet', (['(nodes + split_nodes)'], {'verbose': '(True)'}), '(nodes + split_nodes, verbose=True)\n', (3756, 3791), True, 'import FrEIA.framework as Ff\n'), ((3995, 4033), 'torch.sum', 'torch.sum', (['(z ** 2)'], {'dim': '(1)', 'keepdim': '(True)'}), '(z ** 2, dim=1, keepdim=True)\n', (4004, 4033), False, 'import torch\n'), ((4069, 4094), 'torch.sum', 'torch.sum', (['(mu ** 2)'], {'dim': '(2)'}), '(mu ** 2, dim=2)\n', (4078, 4094), False, 'import torch\n'), ((7911, 7928), 'torch.load', 'torch.load', (['fname'], {}), '(fname)\n', (7921, 7928), False, 'import torch\n'), ((857, 928), 'FrEIA.framework.InputNode', 'Ff.InputNode', (['input_dims[0]', 'input_dims[1]', 'input_dims[2]'], {'name': '"""input"""'}), "(input_dims[0], input_dims[1], input_dims[2], name='input')\n", (869, 928), True, 'import FrEIA.framework as Ff\n'), ((1071, 1113), 'numpy.sqrt', 'np.sqrt', (['(2 * (n_loss_dims_1d // n_classes))'], {}), '(2 * (n_loss_dims_1d // n_classes))\n', (1078, 1113), True, 'import numpy as np\n'), ((1148, 1189), 'torch.zeros', 'torch.zeros', (['(1)', 'n_classes', 'n_loss_dims_1d'], {}), '(1, n_classes, n_loss_dims_1d)\n', (1159, 1189), False, 'import torch\n'), ((3598, 3642), 'FrEIA.framework.OutputNode', 'Ff.OutputNode', (['head_nodes[-1]'], {'name': '"""out_fc"""'}), "(head_nodes[-1], name='out_fc')\n", (3611, 3642), True, 'import FrEIA.framework as Ff\n'), ((6045, 6060), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6058, 6060), False, 'import torch\n'), ((9027, 9092), 'torch.hub.load_state_dict_from_url', 'load_state_dict_from_url', (['model_urls[beta_str]'], {'progress': 'progress'}), '(model_urls[beta_str], progress=progress)\n', (9051, 9092), False, 'from torch.hub import load_state_dict_from_url\n'), ((1329, 1349), 'torch.eye', 'torch.eye', (['n_classes'], {}), '(n_classes)\n', (1338, 1349), False, 'import torch\n'), ((3892, 3922), 'torch.mm', 'torch.mm', (['self.mu_t', 'self.mu_m'], {}), '(self.mu_t, self.mu_m)\n', (3900, 3922), False, 'import torch\n'), ((4334, 4349), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4347, 4349), False, 'import torch\n'), ((5439, 5464), 'torch.mean', 'torch.mean', (['losses[lname]'], {}), '(losses[lname])\n', (5449, 5464), False, 'import torch\n'), ((1774, 1833), 'torch.randn', 'torch.randn', (['(1)', 'n_classes', '(n_total_dims_1d - n_loss_dims_1d)'], {}), '(1, n_classes, n_total_dims_1d - n_loss_dims_1d)\n', (1785, 1833), False, 'import torch\n'), ((6588, 6632), 'torch.randn', 'torch.randn', (['y.shape[0]', 'self.n_loss_dims_1d'], {}), '(y.shape[0], self.n_loss_dims_1d)\n', (6599, 6632), False, 'import torch\n'), ((4828, 4876), 'torch.logsumexp', 'torch.logsumexp', (['(-0.5 * cluster_distances)'], {'dim': '(1)'}), '(-0.5 * cluster_distances, dim=1)\n', (4843, 4876), False, 'import torch\n'), ((5022, 5061), 'torch.sum', 'torch.sum', (['(cluster_distances * y)'], {'dim': '(1)'}), '(cluster_distances * y, dim=1)\n', (5031, 5061), False, 'import torch\n'), ((5140, 5190), 'torch.log_softmax', 'torch.log_softmax', (['(-0.5 * cluster_distances)'], {'dim': '(1)'}), '(-0.5 * cluster_distances, dim=1)\n', (5157, 5190), False, 'import torch\n'), ((1543, 1590), 'torch.randn', 'torch.randn', (['self.n_classes', 'self.mu_low_rank_k'], {}), '(self.n_classes, self.mu_low_rank_k)\n', (1554, 1590), False, 'import torch\n'), ((1651, 1696), 'torch.randn', 'torch.randn', (['self.mu_low_rank_k', 'mu_conv_dims'], {}), '(self.mu_low_rank_k, mu_conv_dims)\n', (1662, 1696), False, 'import torch\n'), ((5248, 5270), 'torch.argmax', 'torch.argmax', (['y'], {'dim': '(1)'}), '(y, dim=1)\n', (5260, 5270), False, 'import torch\n'), ((5274, 5313), 'torch.argmax', 'torch.argmax', (['(-cluster_distances)'], {'dim': '(1)'}), '(-cluster_distances, dim=1)\n', (5286, 5313), False, 'import torch\n'), ((5867, 5892), 'torch.eye', 'torch.eye', (['self.n_classes'], {}), '(self.n_classes)\n', (5876, 5892), False, 'import torch\n')] |
import numpy as np
from skimage.morphology import label
from scipy.sparse import csr_matrix
from scipy.spatial import cKDTree as KDTree
import pandas as pd
import itertools
from tqdm import tqdm
def compute_M(data):
cols = np.arange(data.size)
return csr_matrix((cols, (data.ravel(), cols)),
shape=(data.max() + 1, data.size))
def get_indices_sparse(data):
M = compute_M(data)
return [np.unravel_index(row.data, data.shape) for row in M]
def Execute_Correspondences_CreateInputs(candidates,normalized_images,im_th,cycle,channels,nbit):
print(str(cycle)+" "+str(channels))
inputs_df=pd.DataFrame(columns=['cycle','ch','x','y','z','Intensities_window_5x5'])
max_df=pd.DataFrame(columns=['I_T','I_G','I_C','I_A','x_T','y_T','z_T','x_G','y_G','z_G','x_C','y_C','z_C','x_A','y_A','z_A','cycle'])
cc, n_c = label(np.amax(candidates[cycle,2:channels,:,:,:],axis=0),return_num=True,connectivity=1)
conn_components = np.zeros((4,candidates.shape[-3],candidates.shape[-2],candidates.shape[-1]))
for ch in range(4):
conn_components[ch,:,:,:] = np.multiply(cc,candidates[cycle,ch+2,:,:,:]).astype(np.uint16) #presume to not have more than 65535 cc to save memory
conn_components = get_indices_sparse(conn_components.astype(np.uint16))
for i in tqdm(range(1,n_c+1)):
ch,z,y,x = conn_components[i]
kdT_tmp = KDTree(np.array([z,x,y]).T)
if len(list(itertools.combinations(np.arange(len(x)),2)))==len(kdT_tmp.query_pairs(2,p=1)): # if connected components is too large (likely cover more signals) then split it
df=pd.Series(data={ 'I_T':np.nan,'I_G':np.nan,'I_C':np.nan,'I_A':np.nan,'x_T':np.nan,'y_T':np.nan,'z_T':np.nan,'x_G':np.nan,'y_G':np.nan,'z_G':np.nan,'x_C':np.nan,'y_C':np.nan,'z_C':np.nan,'x_A':np.nan,'y_A':np.nan,'z_A':np.nan,'cycle':cycle})
df=df[['I_T','I_G','I_C','I_A','x_T','y_T','z_T','x_G','y_G','z_G','x_C','y_C','z_C','x_A','y_A','z_A','cycle']]
for j in range(len(x)):
df.iloc[ch[j]] = im_th[cycle,ch[j]+2,z[j],y[j],x[j]]
df.iloc[ch[j]*3+4]= x[j]
df.iloc[ch[j]*3+4+1]= y[j]
df.iloc[ch[j]*3+4+2]= z[j]
I=df['I_T':'I_A']
col=I[I==np.nanmax(I)].index[0] #retrieving the column
tomove=df.index.get_loc(col) #column index to reach the correct columns coordinates
x_ch=int(df[tomove*3+4])
y_ch=int(df[tomove*3+4+1])
z_ch=int(df[tomove*3+4+2])
ch_idx=tomove
cycle=int(df['cycle'])
rect=normalized_images[cycle,ch_idx+2,z_ch,y_ch-2:y_ch+3,x_ch-2:x_ch+3]
if not rect.size==0:
rect=(rect-np.amin(rect))/(np.amax(rect)-np.amin(rect))
rect=rect-np.mean(rect)
row=pd.Series(data={'cycle':cycle,'ch':ch_idx+2,'x':x_ch,'y':y_ch,'z':z_ch,'Intensities_window_5x5':rect})
inputs_df=inputs_df.append(row,ignore_index=True)
max_df=max_df.append(df,ignore_index=True)
else:
coords = np.vstack((z,x,y))
coords_unique = np.unique(coords,axis=1)
for j in range(coords_unique.shape[-1]):
coords_tmp = coords_unique[:,j][:, np.newaxis]
coords_idx = np.argwhere(np.all(coords==coords_tmp,axis=0)).reshape((-1,))
df=pd.Series(data={ 'I_T':np.nan,'I_G':np.nan,'I_C':np.nan,'I_A':np.nan,'x_T':np.nan,'y_T':np.nan,'z_T':np.nan,'x_G':np.nan,'y_G':np.nan,'z_G':np.nan,'x_C':np.nan,'y_C':np.nan,'z_C':np.nan,'x_A':np.nan,'y_A':np.nan,'z_A':np.nan,'cycle':cycle})
df=df[['I_T','I_G','I_C','I_A','x_T','y_T','z_T','x_G','y_G','z_G','x_C','y_C','z_C','x_A','y_A','z_A','cycle']]
for k in coords_idx:
df.iloc[ch[k]] = im_th[cycle,ch[k]+2,z[k],y[k],x[k]]
df.iloc[ch[k]*3+4]= x[k]
df.iloc[ch[k]*3+4+1]= y[k]
df.iloc[ch[k]*3+4+2]= z[k]
I=df['I_T':'I_A']
col=I[I==np.nanmax(I)].index[0] #retrieving the column
tomove=df.index.get_loc(col) #column index to reach the correct columns coordinates
x_ch=int(df[tomove*3+4])
y_ch=int(df[tomove*3+4+1])
z_ch=int(df[tomove*3+4+2])
ch_idx=tomove
cycle=int(df['cycle'])
rect=normalized_images[cycle,ch_idx+2,z_ch,y_ch-2:y_ch+3,x_ch-2:x_ch+3]
if not rect.size==0:
rect=(rect-np.amin(rect))/(np.amax(rect)-np.amin(rect))
rect=rect-np.mean(rect)
row=pd.Series(data={'cycle':cycle,'ch':ch_idx+2,'x':x_ch,'y':y_ch,'z':z_ch,'Intensities_window_5x5':rect})
inputs_df=inputs_df.append(row,ignore_index=True)
max_df=max_df.append(df,ignore_index=True)
return {'max_df':max_df, 'inputs_df':inputs_df}
| [
"pandas.Series",
"numpy.mean",
"numpy.multiply",
"numpy.unique",
"numpy.amin",
"numpy.array",
"numpy.zeros",
"numpy.unravel_index",
"numpy.vstack",
"numpy.nanmax",
"pandas.DataFrame",
"numpy.all",
"numpy.amax",
"numpy.arange"
] | [((237, 257), 'numpy.arange', 'np.arange', (['data.size'], {}), '(data.size)\n', (246, 257), True, 'import numpy as np\n'), ((650, 728), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['cycle', 'ch', 'x', 'y', 'z', 'Intensities_window_5x5']"}), "(columns=['cycle', 'ch', 'x', 'y', 'z', 'Intensities_window_5x5'])\n", (662, 728), True, 'import pandas as pd\n'), ((736, 883), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['I_T', 'I_G', 'I_C', 'I_A', 'x_T', 'y_T', 'z_T', 'x_G', 'y_G', 'z_G',\n 'x_C', 'y_C', 'z_C', 'x_A', 'y_A', 'z_A', 'cycle']"}), "(columns=['I_T', 'I_G', 'I_C', 'I_A', 'x_T', 'y_T', 'z_T',\n 'x_G', 'y_G', 'z_G', 'x_C', 'y_C', 'z_C', 'x_A', 'y_A', 'z_A', 'cycle'])\n", (748, 883), True, 'import pandas as pd\n'), ((996, 1075), 'numpy.zeros', 'np.zeros', (['(4, candidates.shape[-3], candidates.shape[-2], candidates.shape[-1])'], {}), '((4, candidates.shape[-3], candidates.shape[-2], candidates.shape[-1]))\n', (1004, 1075), True, 'import numpy as np\n'), ((440, 478), 'numpy.unravel_index', 'np.unravel_index', (['row.data', 'data.shape'], {}), '(row.data, data.shape)\n', (456, 478), True, 'import numpy as np\n'), ((887, 942), 'numpy.amax', 'np.amax', (['candidates[cycle, 2:channels, :, :, :]'], {'axis': '(0)'}), '(candidates[cycle, 2:channels, :, :, :], axis=0)\n', (894, 942), True, 'import numpy as np\n'), ((1664, 1949), 'pandas.Series', 'pd.Series', ([], {'data': "{'I_T': np.nan, 'I_G': np.nan, 'I_C': np.nan, 'I_A': np.nan, 'x_T': np.nan,\n 'y_T': np.nan, 'z_T': np.nan, 'x_G': np.nan, 'y_G': np.nan, 'z_G': np.\n nan, 'x_C': np.nan, 'y_C': np.nan, 'z_C': np.nan, 'x_A': np.nan, 'y_A':\n np.nan, 'z_A': np.nan, 'cycle': cycle}"}), "(data={'I_T': np.nan, 'I_G': np.nan, 'I_C': np.nan, 'I_A': np.nan,\n 'x_T': np.nan, 'y_T': np.nan, 'z_T': np.nan, 'x_G': np.nan, 'y_G': np.\n nan, 'z_G': np.nan, 'x_C': np.nan, 'y_C': np.nan, 'z_C': np.nan, 'x_A':\n np.nan, 'y_A': np.nan, 'z_A': np.nan, 'cycle': cycle})\n", (1673, 1949), True, 'import pandas as pd\n'), ((2908, 3027), 'pandas.Series', 'pd.Series', ([], {'data': "{'cycle': cycle, 'ch': ch_idx + 2, 'x': x_ch, 'y': y_ch, 'z': z_ch,\n 'Intensities_window_5x5': rect}"}), "(data={'cycle': cycle, 'ch': ch_idx + 2, 'x': x_ch, 'y': y_ch, 'z':\n z_ch, 'Intensities_window_5x5': rect})\n", (2917, 3027), True, 'import pandas as pd\n'), ((3195, 3215), 'numpy.vstack', 'np.vstack', (['(z, x, y)'], {}), '((z, x, y))\n', (3204, 3215), True, 'import numpy as np\n'), ((3243, 3268), 'numpy.unique', 'np.unique', (['coords'], {'axis': '(1)'}), '(coords, axis=1)\n', (3252, 3268), True, 'import numpy as np\n'), ((1137, 1188), 'numpy.multiply', 'np.multiply', (['cc', 'candidates[cycle, ch + 2, :, :, :]'], {}), '(cc, candidates[cycle, ch + 2, :, :, :])\n', (1148, 1188), True, 'import numpy as np\n'), ((1443, 1462), 'numpy.array', 'np.array', (['[z, x, y]'], {}), '([z, x, y])\n', (1451, 1462), True, 'import numpy as np\n'), ((3498, 3783), 'pandas.Series', 'pd.Series', ([], {'data': "{'I_T': np.nan, 'I_G': np.nan, 'I_C': np.nan, 'I_A': np.nan, 'x_T': np.nan,\n 'y_T': np.nan, 'z_T': np.nan, 'x_G': np.nan, 'y_G': np.nan, 'z_G': np.\n nan, 'x_C': np.nan, 'y_C': np.nan, 'z_C': np.nan, 'x_A': np.nan, 'y_A':\n np.nan, 'z_A': np.nan, 'cycle': cycle}"}), "(data={'I_T': np.nan, 'I_G': np.nan, 'I_C': np.nan, 'I_A': np.nan,\n 'x_T': np.nan, 'y_T': np.nan, 'z_T': np.nan, 'x_G': np.nan, 'y_G': np.\n nan, 'z_G': np.nan, 'x_C': np.nan, 'y_C': np.nan, 'z_C': np.nan, 'x_A':\n np.nan, 'y_A': np.nan, 'z_A': np.nan, 'cycle': cycle})\n", (3507, 3783), True, 'import pandas as pd\n'), ((4805, 4924), 'pandas.Series', 'pd.Series', ([], {'data': "{'cycle': cycle, 'ch': ch_idx + 2, 'x': x_ch, 'y': y_ch, 'z': z_ch,\n 'Intensities_window_5x5': rect}"}), "(data={'cycle': cycle, 'ch': ch_idx + 2, 'x': x_ch, 'y': y_ch, 'z':\n z_ch, 'Intensities_window_5x5': rect})\n", (4814, 4924), True, 'import pandas as pd\n'), ((2877, 2890), 'numpy.mean', 'np.mean', (['rect'], {}), '(rect)\n', (2884, 2890), True, 'import numpy as np\n'), ((2805, 2818), 'numpy.amin', 'np.amin', (['rect'], {}), '(rect)\n', (2812, 2818), True, 'import numpy as np\n'), ((2821, 2834), 'numpy.amax', 'np.amax', (['rect'], {}), '(rect)\n', (2828, 2834), True, 'import numpy as np\n'), ((2835, 2848), 'numpy.amin', 'np.amin', (['rect'], {}), '(rect)\n', (2842, 2848), True, 'import numpy as np\n'), ((4770, 4783), 'numpy.mean', 'np.mean', (['rect'], {}), '(rect)\n', (4777, 4783), True, 'import numpy as np\n'), ((2332, 2344), 'numpy.nanmax', 'np.nanmax', (['I'], {}), '(I)\n', (2341, 2344), True, 'import numpy as np\n'), ((3428, 3464), 'numpy.all', 'np.all', (['(coords == coords_tmp)'], {'axis': '(0)'}), '(coords == coords_tmp, axis=0)\n', (3434, 3464), True, 'import numpy as np\n'), ((4694, 4707), 'numpy.amin', 'np.amin', (['rect'], {}), '(rect)\n', (4701, 4707), True, 'import numpy as np\n'), ((4710, 4723), 'numpy.amax', 'np.amax', (['rect'], {}), '(rect)\n', (4717, 4723), True, 'import numpy as np\n'), ((4724, 4737), 'numpy.amin', 'np.amin', (['rect'], {}), '(rect)\n', (4731, 4737), True, 'import numpy as np\n'), ((4185, 4197), 'numpy.nanmax', 'np.nanmax', (['I'], {}), '(I)\n', (4194, 4197), True, 'import numpy as np\n')] |
from unittest import TestCase
import numpy as np
from pyecsca.sca import Trace, welch_ttest, student_ttest, ks_test
class TTestTests(TestCase):
def setUp(self):
self.a = Trace(np.array([20, 80], dtype=np.dtype("i1")))
self.b = Trace(np.array([30, 42], dtype=np.dtype("i1")))
self.c = Trace(np.array([78, 56], dtype=np.dtype("i1")))
self.d = Trace(np.array([98, 36], dtype=np.dtype("i1")))
def test_welch_ttest(self):
self.assertIsNotNone(welch_ttest([self.a, self.b], [self.c, self.d]))
a = Trace(np.array([19.8, 20.4, 19.6, 17.8, 18.5, 18.9, 18.3, 18.9, 19.5, 22.0]))
b = Trace(np.array([28.2, 26.6, 20.1, 23.3, 25.2, 22.1, 17.7, 27.6, 20.6, 13.7]))
c = Trace(np.array([20.2, 21.6, 27.1, 13.3, 24.2, 20.1, 11.7, 25.6, 26.6, 21.4]))
result = welch_ttest([a, b], [b, c], dof=True, p_value=True)
self.assertIsNotNone(result)
def test_students_ttest(self):
self.assertIsNone(student_ttest([], []))
self.assertIsNotNone(student_ttest([self.a, self.b], [self.c, self.d]))
class KolmogorovSmirnovTests(TestCase):
def test_ks_test(self):
self.assertIsNone(ks_test([], []))
a = Trace(np.array([20, 80], dtype=np.dtype("i1")))
b = Trace(np.array([30, 42], dtype=np.dtype("i1")))
c = Trace(np.array([78, 56], dtype=np.dtype("i1")))
d = Trace(np.array([98, 36], dtype=np.dtype("i1")))
self.assertIsNotNone(ks_test([a, b], [c, d]))
| [
"pyecsca.sca.welch_ttest",
"pyecsca.sca.ks_test",
"pyecsca.sca.student_ttest",
"numpy.array",
"numpy.dtype"
] | [((829, 880), 'pyecsca.sca.welch_ttest', 'welch_ttest', (['[a, b]', '[b, c]'], {'dof': '(True)', 'p_value': '(True)'}), '([a, b], [b, c], dof=True, p_value=True)\n', (840, 880), False, 'from pyecsca.sca import Trace, welch_ttest, student_ttest, ks_test\n'), ((492, 539), 'pyecsca.sca.welch_ttest', 'welch_ttest', (['[self.a, self.b]', '[self.c, self.d]'], {}), '([self.a, self.b], [self.c, self.d])\n', (503, 539), False, 'from pyecsca.sca import Trace, welch_ttest, student_ttest, ks_test\n'), ((559, 629), 'numpy.array', 'np.array', (['[19.8, 20.4, 19.6, 17.8, 18.5, 18.9, 18.3, 18.9, 19.5, 22.0]'], {}), '([19.8, 20.4, 19.6, 17.8, 18.5, 18.9, 18.3, 18.9, 19.5, 22.0])\n', (567, 629), True, 'import numpy as np\n'), ((649, 719), 'numpy.array', 'np.array', (['[28.2, 26.6, 20.1, 23.3, 25.2, 22.1, 17.7, 27.6, 20.6, 13.7]'], {}), '([28.2, 26.6, 20.1, 23.3, 25.2, 22.1, 17.7, 27.6, 20.6, 13.7])\n', (657, 719), True, 'import numpy as np\n'), ((739, 809), 'numpy.array', 'np.array', (['[20.2, 21.6, 27.1, 13.3, 24.2, 20.1, 11.7, 25.6, 26.6, 21.4]'], {}), '([20.2, 21.6, 27.1, 13.3, 24.2, 20.1, 11.7, 25.6, 26.6, 21.4])\n', (747, 809), True, 'import numpy as np\n'), ((980, 1001), 'pyecsca.sca.student_ttest', 'student_ttest', (['[]', '[]'], {}), '([], [])\n', (993, 1001), False, 'from pyecsca.sca import Trace, welch_ttest, student_ttest, ks_test\n'), ((1032, 1081), 'pyecsca.sca.student_ttest', 'student_ttest', (['[self.a, self.b]', '[self.c, self.d]'], {}), '([self.a, self.b], [self.c, self.d])\n', (1045, 1081), False, 'from pyecsca.sca import Trace, welch_ttest, student_ttest, ks_test\n'), ((1180, 1195), 'pyecsca.sca.ks_test', 'ks_test', (['[]', '[]'], {}), '([], [])\n', (1187, 1195), False, 'from pyecsca.sca import Trace, welch_ttest, student_ttest, ks_test\n'), ((1467, 1490), 'pyecsca.sca.ks_test', 'ks_test', (['[a, b]', '[c, d]'], {}), '([a, b], [c, d])\n', (1474, 1490), False, 'from pyecsca.sca import Trace, welch_ttest, student_ttest, ks_test\n'), ((218, 232), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (226, 232), True, 'import numpy as np\n'), ((283, 297), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (291, 297), True, 'import numpy as np\n'), ((348, 362), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (356, 362), True, 'import numpy as np\n'), ((413, 427), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (421, 427), True, 'import numpy as np\n'), ((1241, 1255), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (1249, 1255), True, 'import numpy as np\n'), ((1301, 1315), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (1309, 1315), True, 'import numpy as np\n'), ((1361, 1375), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (1369, 1375), True, 'import numpy as np\n'), ((1421, 1435), 'numpy.dtype', 'np.dtype', (['"""i1"""'], {}), "('i1')\n", (1429, 1435), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Data Analysis
# This module will explore the stage of data analysis using the lens of a bias-aware methodology. We will make use of Jupyter notebooks to aid our exploratory data analysis, in order to understand how social, cognitive, and statistical biases interact and affect downstream stages in the research and innovation lifecycle. However, you do not need to have any experience with Python or Jupyter Notebooks to follow along.
#
# Exploratory data analysis is an important stage for hypothesis generation or uncovering possible limitations of the dataset that can arise from missing data, in turn identifying the need for any subsequent augmentation of the dataset to deal with possible class imbalances. However, there are also risks that stem from cognitive biases (e.g. confirmation bias) that can create cascading effects that effect downstream tasks (e.g. model reporting).
#
# We will look at the following stages of data analysis:
#
# 1. Importing Data
# 2. Describing the Data
# 3. Analysing the Data
# 4. Querying the Data
# 5. Visualising the Data
# In[1]:
from matplotlib import rcParams, cycler
import matplotlib.pyplot as plt
import numpy as np
plt.ion()
# In[ ]:
# Fixing random state for reproducibility
np.random.seed(19680801)
N = 10
data = [np.logspace(0, 1, 100) + np.random.randn(100) + ii for ii in range(N)]
data = np.array(data).T
cmap = plt.cm.coolwarm
rcParams['axes.prop_cycle'] = cycler(color=cmap(np.linspace(0, 1, N)))
from matplotlib.lines import Line2D
custom_lines = [Line2D([0], [0], color=cmap(0.), lw=4),
Line2D([0], [0], color=cmap(.5), lw=4),
Line2D([0], [0], color=cmap(1.), lw=4)]
fig, ax = plt.subplots(figsize=(10, 5))
lines = ax.plot(data)
ax.legend(custom_lines, ['Cold', 'Medium', 'Hot']);
| [
"numpy.array",
"numpy.linspace",
"numpy.random.seed",
"matplotlib.pyplot.ion",
"numpy.logspace",
"numpy.random.randn",
"matplotlib.pyplot.subplots"
] | [((1216, 1225), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1223, 1225), True, 'import matplotlib.pyplot as plt\n'), ((1281, 1305), 'numpy.random.seed', 'np.random.seed', (['(19680801)'], {}), '(19680801)\n', (1295, 1305), True, 'import numpy as np\n'), ((1728, 1757), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1740, 1757), True, 'import matplotlib.pyplot as plt\n'), ((1400, 1414), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1408, 1414), True, 'import numpy as np\n'), ((1322, 1344), 'numpy.logspace', 'np.logspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (1333, 1344), True, 'import numpy as np\n'), ((1347, 1367), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (1362, 1367), True, 'import numpy as np\n'), ((1488, 1508), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (1499, 1508), True, 'import numpy as np\n')] |
from classes import Stock, format
import numpy as np
import os
from datetime import date, timedelta
from dateutil.parser import parse
import sys
# datapath = 'formated/SZ#002673.json'
datapath = 'formated/SH#600007.json'
# datapath = 'formated/SZ#000553.json'
# amount = float(10000)
stock = Stock()
def getFiles(dirname):
files = None
for dirpath, dirnames, filenames in os.walk(dirname):
files = [os.path.join(dirpath, p) for p in filenames if p.endswith('.json')]
return files
files = getFiles('formated')
def getDates(arr):
return [' - '.join([d['date'], str(d['close'])]) for d in arr]
def reformatItem(item, index):
item['index'] = index
item['odate'] = parse(item['date']).date()
return item
# result = []
# amounts = []
def test(filepath, amount=float(10000), rate=0.1, begdate=date(2016, 1, 1), enddate=date(2017, 1, 1)):
stock.load(filepath)
nextDate = begdate
total = len(stock.items)
# print('rate = %s' % rate)
# global result
# global amounts
durs = []
turnings = [reformatItem(stock.items[i], i) for i in range(1, total - 1)
if stock.isFirstTurningByMACD(stock.items[i], stock.items[i - 1])]
for d in [d for d in turnings if d['odate'] >= begdate and d['odate'] < enddate]:
if d['odate'] < nextDate:
continue
if d['odate'] > nextDate:
nextDate = d['odate']
# in
num = int(amount / d['close'])
for dd in [j for j in stock.items if parse(j['date']).date() > nextDate]:
diff = dd['high'] - d['close']
diffp = diff / d['close']
if diffp >= rate:
# out
out = d['close'] * (1 + rate)
diffout = out - d['close']
dur = parse(dd['date']) - parse(d['date'])
durs.extend([dur])
# print('%s - %-5s | %s - %-7s open=%-5s,high=%-5s,close=%-5s | %-6s:%s | %s' % (
# d['date'], d['close'], dd['date'], out, dd['open'], dd['high'],
# dd['close'], diffout, num, dur))
amount += diffout * num
nextDate = parse(dd['date']).date()
break
# print('amount = {}'.format(amount))
# lendurs = len(durs)
# if not lendurs == 0:
# result.extend(
# [{'code': stock.code, 'amount': amount, 'durs': durs, 'avgtimes': sum(durs, timedelta()) / len(durs)}])
# result.extend([{'code': stock.code, 'amount': amount, 'durs': durs, 'avgtimes': timedelta()}])
# sortedDurs = sorted(durs, reverse=True)
# if len(sortedDurs) > 0:
# d = sortedDurs[0]
# return {
# 'code': stock.code,
# 'rate': rate,
# 'amount': amount,
# 'period': d
# }
# return None
lendurs = len(durs)
avg = 0
if lendurs > 0:
avg = sum(durs, timedelta()) / lendurs
return {
'times': lendurs,
'amount': amount,
'avgtime': avg,
'rate': rate
}
def displayResult(arr):
for d in arr:
print('%-20s | %-10s' % (d['avgtimes'], d['amount']))
# test(datapath)
# for fp in files[:10]:
# test(fp)
result = []
lenfile = len(files)
i = 1
for fp in files:
periods = []
for rate in np.arange(0.02, 0.1, 0.001)[::-1]:
d = test(fp, rate=rate)
periods.extend([d])
sys.stdout.write('\r%4d/%4d %5f' % (i,lenfile,rate))
sys.stdout.flush()
sp = sorted(periods, key=lambda d: (d['times'], d['amount']), reverse=True)
if len(sp) > 0:
first = sp[0]
first['code'] = stock.code
result.extend([first])
i+=1
sr = sorted(result, key=lambda d: (d['times'], d['amount']), reverse=True)
print (sr[:10])
# sortedamounts = sorted(amounts, key=lambda (a, r): a)
# print (sortedamounts)
# sortedresult = sorted(result, key=lambda d: (d['avgtimes']))
# displayResult(sortedresult)
| [
"dateutil.parser.parse",
"os.walk",
"os.path.join",
"datetime.timedelta",
"datetime.date",
"sys.stdout.flush",
"classes.Stock",
"numpy.arange",
"sys.stdout.write"
] | [((294, 301), 'classes.Stock', 'Stock', ([], {}), '()\n', (299, 301), False, 'from classes import Stock, format\n'), ((384, 400), 'os.walk', 'os.walk', (['dirname'], {}), '(dirname)\n', (391, 400), False, 'import os\n'), ((837, 853), 'datetime.date', 'date', (['(2016)', '(1)', '(1)'], {}), '(2016, 1, 1)\n', (841, 853), False, 'from datetime import date, timedelta\n'), ((863, 879), 'datetime.date', 'date', (['(2017)', '(1)', '(1)'], {}), '(2017, 1, 1)\n', (867, 879), False, 'from datetime import date, timedelta\n'), ((3307, 3334), 'numpy.arange', 'np.arange', (['(0.02)', '(0.1)', '(0.001)'], {}), '(0.02, 0.1, 0.001)\n', (3316, 3334), True, 'import numpy as np\n'), ((3410, 3464), 'sys.stdout.write', 'sys.stdout.write', (["('\\r%4d/%4d %5f' % (i, lenfile, rate))"], {}), "('\\r%4d/%4d %5f' % (i, lenfile, rate))\n", (3426, 3464), False, 'import sys\n'), ((3471, 3489), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3487, 3489), False, 'import sys\n'), ((419, 443), 'os.path.join', 'os.path.join', (['dirpath', 'p'], {}), '(dirpath, p)\n', (431, 443), False, 'import os\n'), ((702, 721), 'dateutil.parser.parse', 'parse', (["item['date']"], {}), "(item['date'])\n", (707, 721), False, 'from dateutil.parser import parse\n'), ((2911, 2922), 'datetime.timedelta', 'timedelta', ([], {}), '()\n', (2920, 2922), False, 'from datetime import date, timedelta\n'), ((1794, 1811), 'dateutil.parser.parse', 'parse', (["dd['date']"], {}), "(dd['date'])\n", (1799, 1811), False, 'from dateutil.parser import parse\n'), ((1814, 1830), 'dateutil.parser.parse', 'parse', (["d['date']"], {}), "(d['date'])\n", (1819, 1830), False, 'from dateutil.parser import parse\n'), ((2172, 2189), 'dateutil.parser.parse', 'parse', (["dd['date']"], {}), "(dd['date'])\n", (2177, 2189), False, 'from dateutil.parser import parse\n'), ((1512, 1528), 'dateutil.parser.parse', 'parse', (["j['date']"], {}), "(j['date'])\n", (1517, 1528), False, 'from dateutil.parser import parse\n')] |
"""
.. module:: Residue
:synopsis: This module implements the Residue class.
"""
# Third-party modules
import numpy as np
# Local modules
from src.atom import Atom
class Residue:
"""
.. class:: Residue
This class groups informations about a residue.
Attributes:
name (str): Name of the residue (1 letter code)
ca_coords (Numpy array): 3D coordinates of the residue
"""
def __init__(self, name):
self.name = name
self.ca_atom = Atom("CA")
self.cb_atom = Atom("CB")
self.c_atom = Atom("C")
self.n_atom = Atom("N")
self.secondary_struct = None
self.ss_confidence = None
def __str__(self):
return str(self.name)
def __repr__(self):
return str(self.name)
def calculate_distance(self, residue, carbon):
"""
Calculate Euclidian distance between two residues with THE most efficient method, the
Einstein summation convention with *numpy.einsum*.
.. math::
distance = \sqrt{(x_a-x_b)^2+(y_a-y_b)^2+(z_a-z_b)^2}
Args:
residue (object): An object of the Residue class.
carbon (str): Type of the carbon.
Returns:
float: The calculated distance.
"""
if carbon == "CA":
a_min_b = self.ca_atom.coords - residue.ca_atom.coords
elif carbon == "CB":
a_min_b = self.cb_atom.coords - residue.cb_atom.coords
# The Numpy's "einsum" function is the most efficient way to calculate a distance
# accordingly to this benchmarking: https://stackoverflow.com/a/47775357/6401758
dist = np.sqrt(np.einsum('i,i->', a_min_b, a_min_b))
return dist
| [
"numpy.einsum",
"src.atom.Atom"
] | [((495, 505), 'src.atom.Atom', 'Atom', (['"""CA"""'], {}), "('CA')\n", (499, 505), False, 'from src.atom import Atom\n'), ((529, 539), 'src.atom.Atom', 'Atom', (['"""CB"""'], {}), "('CB')\n", (533, 539), False, 'from src.atom import Atom\n'), ((562, 571), 'src.atom.Atom', 'Atom', (['"""C"""'], {}), "('C')\n", (566, 571), False, 'from src.atom import Atom\n'), ((594, 603), 'src.atom.Atom', 'Atom', (['"""N"""'], {}), "('N')\n", (598, 603), False, 'from src.atom import Atom\n'), ((1712, 1748), 'numpy.einsum', 'np.einsum', (['"""i,i->"""', 'a_min_b', 'a_min_b'], {}), "('i,i->', a_min_b, a_min_b)\n", (1721, 1748), True, 'import numpy as np\n')] |
import numpy
import ipdb
import util
import dtw
def score(model, orig_mat, list_of_random_startend):
list_of_gen_mat = []
for idx, tup in enumerate(list_of_random_startend):
now_start = tup[0].copy()
now_end = tup[1].copy()
list_of_gen_mat.append(
util.generalize_via_dmp(now_start, now_end, model)
)
list_of_dtwdistance = []
for idx, gen_mat in enumerate(list_of_gen_mat):
dist, cost, acc, path = dtw.dtw(orig_mat, gen_mat, dist=lambda x, y: numpy.linalg.norm(x - y, ord=2))
list_of_dtwdistance.append(dist)
debug_var = zip(list_of_gen_mat, list_of_dtwdistance)
return sum(list_of_dtwdistance), debug_var
| [
"util.generalize_via_dmp",
"numpy.linalg.norm"
] | [((294, 344), 'util.generalize_via_dmp', 'util.generalize_via_dmp', (['now_start', 'now_end', 'model'], {}), '(now_start, now_end, model)\n', (317, 344), False, 'import util\n'), ((515, 546), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(x - y)'], {'ord': '(2)'}), '(x - y, ord=2)\n', (532, 546), False, 'import numpy\n')] |
#pragma pylint: disable=fixme,line-too-long,missing-docstring,invalid-name,no-member,dangerous-default-value,protected-access,unused-import,assignment-from-no-return,redefined-outer-name
import sys
import math
from ctypes import sizeof, c_float, c_void_p, c_uint, string_at
from OpenGL.GL import * #pylint: disable=unused-wildcard-import,wildcard-import
import numpy as np
import imgui
def vec2(x, y=None):
if y is None:
return np.array([x, x], dtype=np.float32)
return np.array([x, y], dtype=np.float32)
def vec3(x, y=None, z=None):
if y is None:
return np.array([x, x, x], dtype=np.float32)
if z is None:
return np.array([x, y, y], dtype=np.float32)
return np.array([x, y, z], dtype=np.float32)
# This is a helper class to provide the ability to use * for matrix/matrix and matrix/vector multiplication.
# It also helps out uploading constants and a few other operations as python does not support overloading functions.
# Note that a vector is just represented as a list on floats, and we rely on numpy to take care of the
class Mat4:
matData = None
# Construct a Mat4 from a python array
def __init__(self,
p=[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]):
if isinstance(p, Mat3):
self.matData = np.matrix(np.identity(4))
self.matData[:3, :3] = p.matData
else:
self.matData = np.matrix(p)
# overload the multiplication operator to enable sane looking transformation expressions!
def __mul__(self, other):
# if it is a list, we let numpy attempt to convert the data
# we then return it as a list also (the typical use case is
# for transforming a vector). Could be made more robust...
if isinstance(other, list):
return np.array(self.matData.dot(other).flat, dtype=np.float32)
# Otherwise we assume it is another Mat4 or something compatible, and just multiply the matrices
# and return the result as a new Mat4
return Mat4(self.matData.dot(other.matData))
# Helper to get data as a contiguous array for upload to OpenGL
def getData(self):
return np.ascontiguousarray(self.matData, dtype=np.float32)
# note: returns an inverted copy, does not change the object (for clarity use the global function instead)
# only implemented as a member to make it easy to overload based on matrix class (i.e. 3x3 or 4x4)
def _inverse(self):
return Mat4(np.linalg.inv(self.matData))
def _transpose(self):
return Mat4(self.matData.T)
def _set_open_gl_uniform(self, loc):
glUniformMatrix4fv(loc, 1, GL_TRUE, self.getData())
class Mat3:
matData = None
# Construct a Mat4 from a python array
def __init__(self, p=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]):
if isinstance(p, Mat4):
self.matData = p.matData[:3, :3]
else:
self.matData = np.matrix(p)
# overload the multiplication operator to enable sane looking transformation expressions!
def __mul__(self, other):
# if it is a list, we let numpy attempt to convert the data
# we then return it as a list also (the typical use case is
# for transforming a vector). Could be made more robust...
if isinstance(other, list) or isinstance(other, np.ndarray):
return np.array(self.matData.dot(other).flat, dtype=np.float32)
# Otherwise we assume it is another Mat3 or something compatible, and just multiply the matrices
# and return the result as a new Mat3
return Mat3(self.matData.dot(other.matData))
# Helper to get data as a contiguous array for upload to OpenGL
def getData(self):
return np.ascontiguousarray(self.matData, dtype=np.float32)
# note: returns an inverted copy, does not change the object (for clarity use the global function instead)
# only implemented as a member to make it easy to overload based on matrix class (i.e. 3x3 or 4x4)
def _inverse(self):
return Mat3(np.linalg.inv(self.matData))
def _transpose(self):
return Mat3(self.matData.T)
def _set_open_gl_uniform(self, loc):
glUniformMatrix3fv(loc, 1, GL_TRUE, self.getData())
#
# matrix consruction functions
#
def make_translation(x, y, z):
return Mat4([[1, 0, 0, x], [0, 1, 0, y], [0, 0, 1, z], [0, 0, 0, 1]])
def make_scale(x, y, z):
return Mat4([[x, 0, 0, 0], [0, y, 0, 0], [0, 0, z, 0], [0, 0, 0, 1]])
def make_rotation_y(angle):
return Mat4([[math.cos(angle), 0, math.sin(angle), 0], [0, 1, 0, 0],
[-math.sin(angle), 0, math.cos(angle), 0], [0, 0, 0, 1]])
def make_rotation_x(angle):
return Mat4([[1, 0, 0, 0], [0, math.cos(angle), -math.sin(angle), 0],
[0, math.sin(angle), math.cos(angle), 0], [0, 0, 0, 1]])
def make_rotation_z(angle):
return Mat4([[math.cos(angle), -math.sin(angle),
0, 0], [math.sin(angle),
math.cos(angle), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
#
# This function creates a rotation and translation matrix that aligns the z axis of the matrix with the given direction vector (zAzis)
# very useful for placing objects that have been modelled with the local space z-axis as 'forwards' in the world.
# To form the orthogonal basis, the given yAxis (typically representing 'up' if the z-axis is 'forwards') is used to align the resulting y-aixis
# as far as possible, while ensuring it is perpendicular to the zAxis.
# The 'translation' becomes the translation of the resulting matrix.
# (For some reason glm seems to lack this highly useful functionality)
def make_mat4_from_zAxis(translation, zAxis, yAxis):
# 1. the z axis is as given.
z = normalize(zAxis)
# 2. the x axis is the cross product of the z axis and rough y azis, it is therefore perpendicular to both of these directions.
x = normalize(cross(yAxis, z))
# the y axis must be perpendicular to the other two axes, which is constructed using the cross product, (unless they are co-linear).
y = cross(z, x)
# There's bound to be a nice and pythonic way to do this...
return Mat4([[x[0], y[0], z[0],
translation[0]], [x[1], y[1], z[1], translation[1]],
[x[2], y[2], z[2], translation[2]], [0, 0, 0, 1]])
#
# Matrix operations
#
# note: returns an inverted copy, does not change the object (for clarity use the global function instead)
def inverse(mat):
return mat._inverse()
def transpose(mat):
return mat._transpose()
#
# vector operations
#
def normalize(v):
norm = np.linalg.norm(v)
return v / norm
def length(v):
return np.linalg.norm(v)
def cross(a, b):
return np.cross(a, b)
# Linearly interpolate from v0 to v1, t in [0,1] named to match GLSL
def mix(v0, v1, t):
return v0 * (1.0 - t) + v1 * t
def dot(a, b):
return np.dot(a, b)
# The reason we need a 'look from', and don't just use lookAt(pos, pos+dir, up) is because if pos is large (i.e., far from the origin) and 'dir' is a unit vector (common case)
# then the precision loss in the addition followed by subtraction in lookAt to get the direction back is _significant_, and leads to jerky camera movements.
def make_lookFrom(eye, direction, up):
f = normalize(direction)
U = np.array(up[:3])
s = normalize(np.cross(f, U))
u = np.cross(s, f)
M = np.matrix(np.identity(4))
M[:3, :3] = np.vstack([s, u, -f])
T = make_translation(-eye[0], -eye[1], -eye[2])
return Mat4(M) * T
# make_lookAt defines a view transform, i.e., from world to view space, using intuitive parameters. location of camera, point to aim, and rough up direction.
# this is basically the same as what we saw in Lexcture #2 for placing the car in the world, except the inverse! (and also view-space 'forwards' is the negative z-axis)
def make_lookAt(eye, target, up):
return make_lookFrom(eye, np.array(target[:3]) - np.array(eye[:3]), up)
def make_perspective(yFovDeg, aspect, n, f):
radFovY = math.radians(yFovDeg)
tanHalfFovY = math.tan(radFovY / 2.0)
sx = 1.0 / (tanHalfFovY * aspect)
sy = 1.0 / tanHalfFovY
zz = -(f + n) / (f - n)
zw = -(2.0 * f * n) / (f - n)
return Mat4([[sx, 0, 0, 0], [0, sy, 0, 0], [0, 0, zz, zw], [0, 0, -1, 0]])
# Turns a multidimensional array (up to 3d?) into a 1D array
def flatten(*lll):
return [u for ll in lll for l in ll for u in l]
def uploadFloatData(bufferObject, floatData):
flatData = flatten(floatData)
data_buffer = (c_float * len(flatData))(*flatData)
# Upload data to the currently bound GL_ARRAY_BUFFER, note that this is
# completely anonymous binary data, no type information is retained (we'll
# supply that later in glVertexAttribPointer)
glBindBuffer(GL_ARRAY_BUFFER, bufferObject)
glBufferData(GL_ARRAY_BUFFER, data_buffer, GL_STATIC_DRAW)
def createVertexArrayObject():
return glGenVertexArrays(1)
def createAndAddVertexArrayData(vertexArrayObject, data, attributeIndex):
glBindVertexArray(vertexArrayObject)
buffer = glGenBuffers(1)
uploadFloatData(buffer, data)
glBindBuffer(GL_ARRAY_BUFFER, buffer)
glVertexAttribPointer(attributeIndex, len(data[0]), GL_FLOAT, GL_FALSE, 0,
None)
glEnableVertexAttribArray(attributeIndex)
# Unbind the buffers again to avoid unintentianal GL state corruption (this is something that can be rather inconventient to debug)
glBindBuffer(GL_ARRAY_BUFFER, 0)
glBindVertexArray(0)
return buffer
def createAndAddIndexArray(vertexArrayObject, indexData):
glBindVertexArray(vertexArrayObject)
indexBuffer = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, indexBuffer)
data_buffer = (c_uint * len(indexData))(*indexData)
glBufferData(GL_ARRAY_BUFFER, data_buffer, GL_STATIC_DRAW)
# Bind the index buffer as the element array buffer of the VAO - this causes it to stay bound to this VAO - fairly unobvious.
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBuffer)
# Unbind the buffers again to avoid unintentianal GL state corruption (this is something that can be rather inconventient to debug)
glBindBuffer(GL_ARRAY_BUFFER, 0)
glBindVertexArray(0)
return indexBuffer
def getShaderInfoLog(obj):
logLength = glGetShaderiv(obj, GL_INFO_LOG_LENGTH)
if logLength > 0:
return glGetShaderInfoLog(obj).decode()
return ""
#
# This function performs the steps needed to compile the source code for a
# shader stage (e.g., vertex / fragment) and attach it to a shader program object.
#
def compileAndAttachShader(shaderProgram, shaderType, sources):
# Create the opengl shader object
shader = glCreateShader(shaderType)
# upload the source code for the shader
# Note the function takes an array of source strings and lengths.
glShaderSource(shader, sources)
glCompileShader(shader)
# If there is a syntax or other compiler error during shader compilation,
# we'd like to know
compileOk = glGetShaderiv(shader, GL_COMPILE_STATUS)
if not compileOk:
err = getShaderInfoLog(shader)
print("SHADER COMPILE ERROR: '%s'" % err)
return False
glAttachShader(shaderProgram, shader)
glDeleteShader(shader)
return True
# creates a shader with a vertex and fragment shader that binds a map of attribute streams
# to the shader and the also any number of output shader variables
# The fragDataLocs can be left out for programs that don't use multiple render targets as
# the default for any output variable is zero.
def buildShader(vertexShaderSources,
fragmentShaderSources,
attribLocs,
fragDataLocs={}):
shader = glCreateProgram()
if compileAndAttachShader(shader, GL_VERTEX_SHADER,
vertexShaderSources) and compileAndAttachShader(
shader, GL_FRAGMENT_SHADER,
fragmentShaderSources):
# Link the attribute names we used in the vertex shader to the integer index
for name, loc in attribLocs.items():
glBindAttribLocation(shader, loc, name)
# If we have multiple images bound as render targets, we need to specify which
# 'out' variable in the fragment shader goes where in this case it is totally redundant
# as we only have one (the default render target, or frame buffer) and the default binding is always zero.
for name, loc in fragDataLocs.items():
glBindFragDataLocation(shader, loc, name)
# once the bindings are done we can link the program stages to get a complete shader pipeline.
# this can yield errors, for example if the vertex and fragment shaders don't have compatible out and in
# variables (e.g., the fragment shader expects some data that the vertex shader is not outputting).
glLinkProgram(shader)
linkStatus = glGetProgramiv(shader, GL_LINK_STATUS)
if not linkStatus:
err = glGetProgramInfoLog(shader)
print("SHADER LINKER ERROR: '%s'" % err)
sys.exit(1)
return shader
# Helper for debugging, if uniforms appear to not be set properly, you can set a breakpoint here,
# or uncomment the printing code. If the 'loc' returned is -1, then the variable is either not
# declared at all in the shader or it is not used and therefore removed by the optimizing shader compiler.
def getUniformLocationDebug(shaderProgram, name):
loc = glGetUniformLocation(shaderProgram, name)
# Useful point for debugging, replace with silencable logging
# TODO: should perhaps replace this with the standard python logging facilities
#if loc == -1:
# print("Uniforn '%s' was not found"%name)
return loc
# Helper to set uniforms of different types, looks the way it does since Python does not have support for
# function overloading (as C++ has for example). This function covers the types used in the code here, but
# makes no claim of completeness. The last case is for Mat3/Mat4 (above), and if you get an exception
# on that line, it is likely because the function was cal
def setUniform(shaderProgram, uniformName, value):
loc = getUniformLocationDebug(shaderProgram, uniformName)
if isinstance(value, float):
glUniform1f(loc, value)
elif isinstance(value, int):
glUniform1i(loc, value)
elif isinstance(value, (np.ndarray, list)):
if len(value) == 2:
glUniform2fv(loc, 1, value)
if len(value) == 3:
glUniform3fv(loc, 1, value)
if len(value) == 4:
glUniform4fv(loc, 1, value)
elif isinstance(value, (Mat3, Mat4)):
value._set_open_gl_uniform(loc)
else:
assert False # If this happens the type was not supported, check your argument types and either add a new else case above or change the type
# Helper function to extend a 3D point to homogeneous, transform it and back again.
# (For practically all cases except projection, the W component is still 1 after,
# but this covers the correct implementation).
# Note that it does not work for vectors! For vectors we're usually better off just using the 3x3 part of the matrix.
def transformPoint(mat4x4, point):
x, y, z, w = mat4x4 * [point[0], point[1], point[2], 1.0]
return vec3(x, y, z) / w
# just a wrapper to convert the returned tuple to a list...
def imguiX_color_edit3_list(label, v):
a, b = imgui.color_edit3(
label,
*v) #, imgui.GuiColorEditFlags_Float);// | ImGuiColorEditFlags_HSV);
return a, list(b)
# Recursively subdivide a triangle with its vertices on the surface of the unit sphere such that the new vertices also are on part of the unit sphere.
def subDivide(dest, v0, v1, v2, level):
#If the level index/counter is non-zero...
if level:
# ...we subdivide the input triangle into four equal sub-triangles
# The mid points are the half way between to vertices, which is really (v0 + v2) / 2, but
# instead we normalize the vertex to 'push' it out to the surface of the unit sphere.
v3 = normalize(v0 + v1)
v4 = normalize(v1 + v2)
v5 = normalize(v2 + v0)
# ...and then recursively call this function for each of those (with the level decreased by one)
subDivide(dest, v0, v3, v5, level - 1)
subDivide(dest, v3, v4, v5, level - 1)
subDivide(dest, v3, v1, v4, level - 1)
subDivide(dest, v5, v4, v2, level - 1)
else:
# If we have reached the terminating level, just output the vertex position
dest.append(v0)
dest.append(v1)
dest.append(v2)
def createSphere(numSubDivisionLevels):
sphereVerts = []
# The root level sphere is formed from 8 triangles in a diamond shape (two pyramids)
subDivide(sphereVerts, vec3(0, 1, 0), vec3(0, 0, 1), vec3(1, 0, 0),
numSubDivisionLevels)
subDivide(sphereVerts, vec3(0, 1, 0), vec3(1, 0, 0), vec3(0, 0, -1),
numSubDivisionLevels)
subDivide(sphereVerts, vec3(0, 1, 0), vec3(0, 0, -1), vec3(-1, 0, 0),
numSubDivisionLevels)
subDivide(sphereVerts, vec3(0, 1, 0), vec3(-1, 0, 0), vec3(0, 0, 1),
numSubDivisionLevels)
subDivide(sphereVerts, vec3(0, -1, 0), vec3(1, 0, 0), vec3(0, 0, 1),
numSubDivisionLevels)
subDivide(sphereVerts, vec3(0, -1, 0), vec3(0, 0, 1), vec3(-1, 0, 0),
numSubDivisionLevels)
subDivide(sphereVerts, vec3(0, -1, 0), vec3(-1, 0, 0), vec3(0, 0, -1),
numSubDivisionLevels)
subDivide(sphereVerts, vec3(0, -1, 0), vec3(0, 0, -1), vec3(1, 0, 0),
numSubDivisionLevels)
return sphereVerts
g_sphereVertexArrayObject = None
g_sphereShader = None
g_numSphereVerts = 0
def drawSphere(position, radius, sphereColour, view):
global g_sphereVertexArrayObject
global g_sphereShader
global g_numSphereVerts
modelToWorldTransform = make_translation(position[0], position[1],
position[2]) * make_scale(
radius, radius, radius)
if not g_sphereVertexArrayObject:
sphereVerts = createSphere(3)
g_numSphereVerts = len(sphereVerts)
g_sphereVertexArrayObject = createVertexArrayObject()
createAndAddVertexArrayData(g_sphereVertexArrayObject, sphereVerts, 0)
# redundantly add as normals...
createAndAddVertexArrayData(g_sphereVertexArrayObject, sphereVerts, 1)
vertexShader = """
#version 330
in vec3 positionIn;
in vec3 normalIn;
uniform mat4 modelToClipTransform;
uniform mat4 modelToViewTransform;
uniform mat3 modelToViewNormalTransform;
// 'out' variables declared in a vertex shader can be accessed in the subsequent stages.
// For a fragment shader the variable is interpolated (the type of interpolation can be modified, try placing 'flat' in front here and in the fragment shader!).
out VertexData
{
vec3 v2f_viewSpacePosition;
vec3 v2f_viewSpaceNormal;
};
void main()
{
v2f_viewSpacePosition = (modelToViewTransform * vec4(positionIn, 1.0)).xyz;
v2f_viewSpaceNormal = normalize(modelToViewNormalTransform * normalIn);
// gl_Position is a buit-in 'out'-variable that gets passed on to the clipping and rasterization stages (hardware fixed function).
// it must be written by the vertex shader in order to produce any drawn geometry.
// We transform the position using one matrix multiply from model to clip space. Note the added 1 at the end of the position to make the 3D
// coordinate homogeneous.
gl_Position = modelToClipTransform * vec4(positionIn, 1.0);
}
"""
fragmentShader = """
#version 330
// Input from the vertex shader, will contain the interpolated (i.e., area weighted average) vaule out put for each of the three vertex shaders that
// produced the vertex data for the triangle this fragmet is part of.
in VertexData
{
vec3 v2f_viewSpacePosition;
vec3 v2f_viewSpaceNormal;
};
uniform vec4 sphereColour;
out vec4 fragmentColor;
void main()
{
float shading = max(0.0, dot(normalize(-v2f_viewSpacePosition), v2f_viewSpaceNormal));
fragmentColor = vec4(sphereColour.xyz * shading, sphereColour.w);
}
"""
g_sphereShader = buildShader([vertexShader], [fragmentShader], {
"positionIn": 0,
"normalIn": 1
})
glUseProgram(g_sphereShader)
setUniform(g_sphereShader, "sphereColour", sphereColour)
modelToClipTransform = view.viewToClipTransform * view.worldToViewTransform * modelToWorldTransform
modelToViewTransform = view.worldToViewTransform * modelToWorldTransform
modelToViewNormalTransform = inverse(transpose(Mat3(modelToViewTransform)))
setUniform(g_sphereShader, "modelToClipTransform", modelToClipTransform)
setUniform(g_sphereShader, "modelToViewTransform", modelToViewTransform)
setUniform(g_sphereShader, "modelToViewNormalTransform",
modelToViewNormalTransform)
glBindVertexArray(g_sphereVertexArrayObject)
glDrawArrays(GL_TRIANGLES, 0, g_numSphereVerts)
def bindTexture(texUnit, textureId, defaultTexture=None):
glActiveTexture(GL_TEXTURE0 + texUnit)
glBindTexture(GL_TEXTURE_2D, textureId
if textureId != -1 else defaultTexture)
| [
"numpy.identity",
"sys.exit",
"numpy.cross",
"math.tan",
"imgui.color_edit3",
"math.radians",
"numpy.ascontiguousarray",
"numpy.array",
"numpy.dot",
"numpy.linalg.inv",
"math.cos",
"numpy.vstack",
"numpy.linalg.norm",
"numpy.matrix",
"math.sin"
] | [((501, 535), 'numpy.array', 'np.array', (['[x, y]'], {'dtype': 'np.float32'}), '([x, y], dtype=np.float32)\n', (509, 535), True, 'import numpy as np\n'), ((728, 765), 'numpy.array', 'np.array', (['[x, y, z]'], {'dtype': 'np.float32'}), '([x, y, z], dtype=np.float32)\n', (736, 765), True, 'import numpy as np\n'), ((6813, 6830), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (6827, 6830), True, 'import numpy as np\n'), ((6884, 6901), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (6898, 6901), True, 'import numpy as np\n'), ((6936, 6950), 'numpy.cross', 'np.cross', (['a', 'b'], {}), '(a, b)\n', (6944, 6950), True, 'import numpy as np\n'), ((7114, 7126), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (7120, 7126), True, 'import numpy as np\n'), ((7545, 7561), 'numpy.array', 'np.array', (['up[:3]'], {}), '(up[:3])\n', (7553, 7561), True, 'import numpy as np\n'), ((7606, 7620), 'numpy.cross', 'np.cross', (['s', 'f'], {}), '(s, f)\n', (7614, 7620), True, 'import numpy as np\n'), ((7673, 7694), 'numpy.vstack', 'np.vstack', (['[s, u, -f]'], {}), '([s, u, -f])\n', (7682, 7694), True, 'import numpy as np\n'), ((8282, 8303), 'math.radians', 'math.radians', (['yFovDeg'], {}), '(yFovDeg)\n', (8294, 8303), False, 'import math\n'), ((8323, 8346), 'math.tan', 'math.tan', (['(radFovY / 2.0)'], {}), '(radFovY / 2.0)\n', (8331, 8346), False, 'import math\n'), ((15913, 15941), 'imgui.color_edit3', 'imgui.color_edit3', (['label', '*v'], {}), '(label, *v)\n', (15930, 15941), False, 'import imgui\n'), ((454, 488), 'numpy.array', 'np.array', (['[x, x]'], {'dtype': 'np.float32'}), '([x, x], dtype=np.float32)\n', (462, 488), True, 'import numpy as np\n'), ((605, 642), 'numpy.array', 'np.array', (['[x, x, x]'], {'dtype': 'np.float32'}), '([x, x, x], dtype=np.float32)\n', (613, 642), True, 'import numpy as np\n'), ((678, 715), 'numpy.array', 'np.array', (['[x, y, y]'], {'dtype': 'np.float32'}), '([x, y, y], dtype=np.float32)\n', (686, 715), True, 'import numpy as np\n'), ((2239, 2291), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['self.matData'], {'dtype': 'np.float32'}), '(self.matData, dtype=np.float32)\n', (2259, 2291), True, 'import numpy as np\n'), ((3841, 3893), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['self.matData'], {'dtype': 'np.float32'}), '(self.matData, dtype=np.float32)\n', (3861, 3893), True, 'import numpy as np\n'), ((7581, 7595), 'numpy.cross', 'np.cross', (['f', 'U'], {}), '(f, U)\n', (7589, 7595), True, 'import numpy as np\n'), ((7640, 7654), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (7651, 7654), True, 'import numpy as np\n'), ((1460, 1472), 'numpy.matrix', 'np.matrix', (['p'], {}), '(p)\n', (1469, 1472), True, 'import numpy as np\n'), ((2562, 2589), 'numpy.linalg.inv', 'np.linalg.inv', (['self.matData'], {}), '(self.matData)\n', (2575, 2589), True, 'import numpy as np\n'), ((3029, 3041), 'numpy.matrix', 'np.matrix', (['p'], {}), '(p)\n', (3038, 3041), True, 'import numpy as np\n'), ((4164, 4191), 'numpy.linalg.inv', 'np.linalg.inv', (['self.matData'], {}), '(self.matData)\n', (4177, 4191), True, 'import numpy as np\n'), ((8171, 8191), 'numpy.array', 'np.array', (['target[:3]'], {}), '(target[:3])\n', (8179, 8191), True, 'import numpy as np\n'), ((8194, 8211), 'numpy.array', 'np.array', (['eye[:3]'], {}), '(eye[:3])\n', (8202, 8211), True, 'import numpy as np\n'), ((13511, 13522), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (13519, 13522), False, 'import sys\n'), ((1355, 1369), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (1366, 1369), True, 'import numpy as np\n'), ((4674, 4689), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (4682, 4689), False, 'import math\n'), ((4694, 4709), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (4702, 4709), False, 'import math\n'), ((4769, 4784), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (4777, 4784), False, 'import math\n'), ((4874, 4889), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (4882, 4889), False, 'import math\n'), ((4935, 4950), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (4943, 4950), False, 'import math\n'), ((4952, 4967), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (4960, 4967), False, 'import math\n'), ((5040, 5055), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (5048, 5055), False, 'import math\n'), ((5102, 5117), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (5110, 5117), False, 'import math\n'), ((5146, 5161), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (5154, 5161), False, 'import math\n'), ((4749, 4764), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (4757, 4764), False, 'import math\n'), ((4892, 4907), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (4900, 4907), False, 'import math\n'), ((5058, 5073), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (5066, 5073), False, 'import math\n')] |
#!/usr/bin/env python3
# Developed by <NAME> and <NAME>
# This file is covered by the LICENSE file in the root of this project.
# Brief: A custom keras padding layer.
# pad([1 2 3 4], 2) -> [3, 4, 1, 2, 3, 4, 1]
import numpy as np
from keras.layers import Layer
from keras.models import Sequential
import keras.backend as K
class RangePadding2D(Layer):
""" A keras layer which does horizontal padding. The input tensor
is padded in the width direction.
"""
def __init__(self, padding, **kwargs):
""" Initialization of the layer.
Args:
padding: defines how much will be padded "before" and "after" the input.
The input is padded in width direction like this:
[ padding:end, original, beginning:padding-1]
Usually one uses half of the width for this argument to have a symmetric padding
"""
self.padding = padding
super(RangePadding2D, self).__init__(**kwargs)
def build(self, input_shape):
super(RangePadding2D, self).build(input_shape)
def call(self, inputs):
if K.backend() == "tensorflow":
# only do range padding in width dimension
out = K.concatenate([inputs[:, :, self.padding:, :], inputs[:, :, :, :], inputs[:, :, :self.padding-1, :]],
axis=2)
else:
raise Exception("Backend " + K.backend() + "not implemented")
return out
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1], input_shape[2] + input_shape[2] - 1 , input_shape[3])
if __name__ == "__main__":
one_channel_test = True
two_channel_test = True
if two_channel_test:
# set test data
image_raw = [[1, 6], [2, 5], [3, 4], [4, 3], [5, 2], [6, 1]]
image = np.expand_dims(np.expand_dims(np.array(image_raw), 0), 0)
print("input image shape: ", image.shape)
print("Input:")
print(image)
# build Keras model
model = Sequential()
rlayer = RangePadding2D(padding=3, input_shape=(1, 6, 2))
model.add(rlayer)
model.build()
# simply apply existing filter, we use predict with no training
out = model.predict(image)
print("Output shape: ",out.shape)
print("result of compute_output_shape (should be the same):", rlayer.compute_output_shape(rlayer.input_shape))
print("Output:")
print(out)
if one_channel_test:
# one channel test
image_raw = [[1, 2, 3, 4, 5, 6]]
# pad to channels_last format
# which is [batch, width, height, channels]=[1,1,6,1]
image = np.expand_dims(np.expand_dims(np.array(image_raw), 2), 0)
print("input image shape: ", image.shape)
print("Input:")
print(image)
# build Keras model
model = Sequential()
rlayer = RangePadding2D(padding=3, input_shape=(1, 6, 1))
model.add(rlayer)
model.build()
# simply apply existing filter, we use predict with no training
out = model.predict(image)
print("Output shape: ",out.shape)
print("result of compute_output_shape (should be the same):", rlayer.compute_output_shape(rlayer.input_shape))
print("Output:")
print(out)
| [
"numpy.array",
"keras.backend.backend",
"keras.backend.concatenate",
"keras.models.Sequential"
] | [((1975, 1987), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1985, 1987), False, 'from keras.models import Sequential\n'), ((2760, 2772), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2770, 2772), False, 'from keras.models import Sequential\n'), ((1117, 1128), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (1126, 1128), True, 'import keras.backend as K\n'), ((1207, 1322), 'keras.backend.concatenate', 'K.concatenate', (['[inputs[:, :, self.padding:, :], inputs[:, :, :, :], inputs[:, :, :self.\n padding - 1, :]]'], {'axis': '(2)'}), '([inputs[:, :, self.padding:, :], inputs[:, :, :, :], inputs[:,\n :, :self.padding - 1, :]], axis=2)\n', (1220, 1322), True, 'import keras.backend as K\n'), ((1823, 1842), 'numpy.array', 'np.array', (['image_raw'], {}), '(image_raw)\n', (1831, 1842), True, 'import numpy as np\n'), ((2608, 2627), 'numpy.array', 'np.array', (['image_raw'], {}), '(image_raw)\n', (2616, 2627), True, 'import numpy as np\n'), ((1388, 1399), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (1397, 1399), True, 'import keras.backend as K\n')] |
import os.path
from algs.DQN.dqn_agent import DQN
from algs.agent import Agent
from configs.config_phaser import *
import torch
import numpy as np
import random
class METADQNAgent(Agent):
def __init__(self, conf_path, round_number, inter_name,
list_traffic_name=None):
super().__init__(conf_path, round_number, inter_name)
self.__conf_path = conf_path
self.__round_number = round_number
self.__inter_name = inter_name
self.__list_traffic_name = list_traffic_name
# a patch
if 'generator' in self.__conf_path.WORK_SAMPLE:
config_dir = os.path.join(self.__conf_path.WORK_SAMPLE, '..')
else:
config_dir = os.path.join(self.__conf_path.WORK_SAMPLE)
self.__conf_exp, self.__conf_agent, self.__conf_traffic = \
conf_path.load_conf_file(
config_dir=config_dir, inter_name=inter_name)
self.__conf_agent = self.decay_epsilon(
self.__conf_agent, self.__round_number)
if self.__round_number == 0:
self.build_network()
self.load_network_bar(self.__list_traffic_name)
else:
self.load_network(self.__round_number - 1)
self.load_network_bar(self.__list_traffic_name)
def build_network(self):
self.model = DQN(self.__conf_traffic)
self.lossfunc = torch.nn.MSELoss()
self.optimizer = torch.optim.Adam(self.model.parameters())
def build_network_bar(self):
print('use pretrained.')
pass
def load_network(self, round_number):
""" used for meta
"""
file_name = self.__inter_name + "_round_%d" % round_number + '.pt'
file_path = os.path.join(self.__conf_path.MODEL, file_name)
ckpt = torch.load(file_path)
self.build_network()
self.model.load_state_dict(ckpt['state_dict'])
self.optimizer.load_state_dict(ckpt['optimizer'])
self.lossfunc.load_state_dict(ckpt['lossfunc'])
def load_network_bar(self, traffic_files):
if traffic_files is None:
return
self.list_model_bar = []
for traffic_file in traffic_files:
file_path = os.path.join(
self.__conf_path.MODEL, '..', '..', 'MODEL_DQN', traffic_file)
file_path = os.path.join(file_path, os.listdir(file_path)[0])
ckpt = torch.load(file_path)
model_target = DQN(self.__conf_traffic)
model_target.load_state_dict((ckpt['state_dict']))
self.list_model_bar.append(model_target)
def choose_action(self, state, choice_random=True):
input = self.convert_state_to_input(self.__conf_traffic, state)
input = torch.Tensor(input).flatten(0).unsqueeze(0)
q_values = self.model.forward(input)
if random.random() <= self.__conf_agent["EPSILON"] and choice_random:
actions = random.randrange(len(q_values[0]))
else:
actions = np.argmax(q_values[0].detach().numpy())
return actions
def prepare_Xs_Y(self, sample_set, traffic_file_idx=None):
if traffic_file_idx is None:
raise ValueError('traffic_file should be input')
state = []
action = []
next_state = []
reward_avg = []
for each in sample_set:
state.append(each[0]['cur_phase_index'] + each[0]['lane_vehicle_cnt'])
action.append(each[1])
next_state.append(
each[2]['cur_phase_index'] + each[2]['lane_vehicle_cnt'])
reward_avg.append(each[3])
q_values = self.model.forward(torch.Tensor(state)).detach().numpy()
q_values_bar = self.list_model_bar[traffic_file_idx].forward(
torch.Tensor(next_state)).detach().numpy()
reward_avg = np.array(reward_avg) / self.__conf_traffic.NORMAL_FACTOR
gamma = self.__conf_agent['GAMMA']
range_idx = list(range(len(q_values)))
q_values[range_idx, action] = \
reward_avg + gamma * np.max(q_values_bar, axis=-1)
if 'Xs' not in self.__dict__.keys():
self.Xs = np.array(state)
self.Y = q_values
else:
self.Xs = np.vstack((self.Xs, np.array(state)))
self.Y = np.vstack((self.Y, q_values))
def train_network(self):
"""used for each task
"""
epochs = self.__conf_agent["EPOCHS"]
for i in range(epochs):
sample_x = self.Xs
sample_y = self.Y
yp = self.model.forward(torch.Tensor(sample_x))
loss = self.lossfunc(yp, torch.Tensor(sample_y))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# print('%d memory, updating... %d, loss: %.4f'
# % (len(self.Y), i, loss.item()))
def save_network(self, round_number):
"""
"""
file_path = os.path.join(
self.__conf_path.MODEL,
self.__inter_name + '_round_%d' % round_number + '.pt')
ckpt = {'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'lossfunc': self.lossfunc.state_dict()}
torch.save(ckpt, file_path)
| [
"torch.load",
"torch.Tensor",
"numpy.max",
"torch.nn.MSELoss",
"numpy.array",
"numpy.vstack",
"torch.save",
"algs.DQN.dqn_agent.DQN",
"random.random"
] | [((1336, 1360), 'algs.DQN.dqn_agent.DQN', 'DQN', (['self.__conf_traffic'], {}), '(self.__conf_traffic)\n', (1339, 1360), False, 'from algs.DQN.dqn_agent import DQN\n'), ((1385, 1403), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (1401, 1403), False, 'import torch\n'), ((1790, 1811), 'torch.load', 'torch.load', (['file_path'], {}), '(file_path)\n', (1800, 1811), False, 'import torch\n'), ((5238, 5265), 'torch.save', 'torch.save', (['ckpt', 'file_path'], {}), '(ckpt, file_path)\n', (5248, 5265), False, 'import torch\n'), ((2397, 2418), 'torch.load', 'torch.load', (['file_path'], {}), '(file_path)\n', (2407, 2418), False, 'import torch\n'), ((2446, 2470), 'algs.DQN.dqn_agent.DQN', 'DQN', (['self.__conf_traffic'], {}), '(self.__conf_traffic)\n', (2449, 2470), False, 'from algs.DQN.dqn_agent import DQN\n'), ((3822, 3842), 'numpy.array', 'np.array', (['reward_avg'], {}), '(reward_avg)\n', (3830, 3842), True, 'import numpy as np\n'), ((4139, 4154), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (4147, 4154), True, 'import numpy as np\n'), ((4280, 4309), 'numpy.vstack', 'np.vstack', (['(self.Y, q_values)'], {}), '((self.Y, q_values))\n', (4289, 4309), True, 'import numpy as np\n'), ((2833, 2848), 'random.random', 'random.random', ([], {}), '()\n', (2846, 2848), False, 'import random\n'), ((4042, 4071), 'numpy.max', 'np.max', (['q_values_bar'], {'axis': '(-1)'}), '(q_values_bar, axis=-1)\n', (4048, 4071), True, 'import numpy as np\n'), ((4557, 4579), 'torch.Tensor', 'torch.Tensor', (['sample_x'], {}), '(sample_x)\n', (4569, 4579), False, 'import torch\n'), ((4618, 4640), 'torch.Tensor', 'torch.Tensor', (['sample_y'], {}), '(sample_y)\n', (4630, 4640), False, 'import torch\n'), ((4241, 4256), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (4249, 4256), True, 'import numpy as np\n'), ((2732, 2751), 'torch.Tensor', 'torch.Tensor', (['input'], {}), '(input)\n', (2744, 2751), False, 'import torch\n'), ((3638, 3657), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (3650, 3657), False, 'import torch\n'), ((3758, 3782), 'torch.Tensor', 'torch.Tensor', (['next_state'], {}), '(next_state)\n', (3770, 3782), False, 'import torch\n')] |
import matplotlib.pyplot as plt
import numpy as np
import os,glob,sys,importlib,pickle#,scipy,coolbox,pybedtools,
# from tqdm import tqdm
from scipy.stats import rankdata
import pandas as pd
import networkx as nx
import seaborn as sns
from joblib import delayed, wrap_non_picklable_objects
from pathlib import Path
import plotly
from numba import jit
from joblib import Parallel
import sklearn.utils as sku
import plotly.graph_objects as go
import plotly.express as px
# j=sys.argv[1]
from urllib import request
import xml.etree.ElementTree as ET
import urllib
sys.path.insert(1, './nestedness_analysis/')
import nestedness_metrics_other_functions
from nestedness_metrics_other_functions import from_edges_to_matrix
# importlib.reload(sys.modules['EO_functions_bipartite'])
import extremal_bi
@delayed
@wrap_non_picklable_objects
def bip(cc,net,ff,C,patt):
# print(net)
# dd=cc[['spec','gene',net]]
dd=pd.read_csv('data/gcn/cc_'+patt+'.txt',index_col=False,sep='\t',usecols=['spec','gene',net])
# try:
dd=dd[dd[net]!=0]
# except:
# pass
# ee=nx.from_pandas_edgelist(dd,source='spec',target='gene')
# remove = [node for node,degree in dict(ee.degree()).items() if degree <5]
# ee.remove_nodes_from(remove)
# ff.append(ee)
B = nx.Graph()
B.add_nodes_from(dd['spec'], bipartite=0)
B.add_nodes_from(dd['gene'], bipartite=1)
B.add_weighted_edges_from(tuple(dd[['spec','gene',net]].itertuples(index=False, name=None)))
remove = [node for node,degree in dict(B.degree()).items() if degree <5]
B.remove_nodes_from(remove)
# C.append(B)
xx=nx.from_pandas_edgelist(dd,source='spec',target='gene',edge_attr=net)
remove = [node for node,degree in dict(xx.degree()).items() if degree <5]
xx.remove_nodes_from(remove)
# with open('data/gcn/NX_'+str(patt)+'_hypert.pkl', 'ab+') as f:
# pickle.dump(ff, f)
# with open('data/gcn/BX_'+str(patt)+'_hypert.pkl', 'ab+') as f:
# pickle.dump(C, f)
return xx,B
def load_list_of_dicts(filename, create_using=nx.Graph):
with open(filename, 'rb') as f:
list_of_dicts = pickle.load(f)
graphs = [create_using(graph) for graph in list_of_dicts]
return graphs
# @delayed
# @wrap_non_picklable_objects
def meas(measur,uni_bact,relgene,graphs,patt):
HTXX=uni_bact[uni_bact.index.isin(relgene.columns[1:-2].str.split('-').str[0])]
HTXX['index']=np.arange(len(HTXX))
# measur=eval(measur)
S = [eval(measur)(graphs[i]) for i in HTXX[HTXX['HT']==0]['index'].values]
T = [eval(measur)(graphs[i]) for i in HTXX[HTXX['HT']!=0]['index'].values]
if measur!='nx.degree':
non=pd.DataFrame(S).melt()
yes=pd.DataFrame(T).melt()
elif measur=='nx.degree':
non=pd.DataFrame(S.pop())
non=non.rename(columns={0:'variable',1:'value'})
yes=pd.DataFrame(T.pop())
yes=yes.rename(columns={0:'variable',1:'value'})
non['type']='NoHT'
non.dropna(inplace=True)
non=non[non.value!=0]
non=non[~non['variable'].str.contains('UniRef90')]
non.value=non.value/np.sum(non.value)
yes['type']='HT'
yes.dropna(inplace=True)
yes=yes[yes.value!=0]
yes=yes[~yes['variable'].str.contains('UniRef90')]
yes.value=yes.value/np.sum(yes.value)
df=non.append(yes)
# df=df.dropna()
df['gen']=df.variable.str.split('_').str[2]
df.to_csv("data/gcn/"+patt+"_"+str(measur)+".txt",sep='\t')
plt.figure(figsize=(10,30))
sns.set_theme(style="whitegrid")
sns.violinplot(data=df, y="gen", x="value",hue="type",
split=True, inner="quart", linewidth=1,
orient="h")
sns.despine(left=True)
plt.savefig("data/gcn/"+patt+"_"+str(measur)+"_violin.png",dpi=300,bbox_inches = "tight")
return df
def time_bar(data,XX,rank='rank',species='all'):
if rank=='rank':
data['rank']=rankdata(data.value,method='min')
elif rank=='rank_diff' or rank=='diff':
data['vx']=rankdata(data.value_x,method='min')
data['vy']=rankdata(data.value_y,method='min')
data['rank_diff']=data['vx'].astype('int')-data['vy'].astype('int')
data['diff']=data['value_x']-data['value_y']
# elif rank=='value':
# rank=data.value
if species!='all':
data=data[data['species']==species]
# clust = ll.groupby(['species','target','time'], as_index=False)['diff'].sum()
df = data[['species','target','time',rank]]#.sort_values(['time'], ascending=[True]).groupby(['species','time']).max(5)
jeff=pd.DataFrame(df.groupby(['species','time'])[rank].nlargest(XX))
jeff.reset_index(inplace=True)
for cc in np.unique(jeff.species):
jeff2=jeff[jeff['species']==cc]
if species!='all':
jeff2=df.loc[jeff2['level_2']]
else:
jeff2=df.iloc[jeff2['level_2']]
plt.figure(figsize=(15,5))
ax = sns.histplot(jeff2, x='time', hue='target', weights=rank,
multiple='stack', palette='icefire', shrink=0.6,bins=len(pd.unique(jeff2.time))+5)
ax.set_ylabel(str(rank)+'_HT')
ax.set_title(cc)
# Fix the legend so it's not on top of the bars.
# legend = ax.get_legend()
plt.legend([],[], frameon=False)
Path("data/gcn/img/"+cc).mkdir(parents=True, exist_ok=True)
plt.savefig("data/gcn/img/"+cc+"/"+str(data)+"_"+cc+"_"+str(rank)+".png",dpi=300,bbox_inches = "tight")
def proc_dat(noHT):
# noHT=jj.filter(regex=str(focus)).dropna(how='all')
noHT.columns=noHT.columns.str.split('_').str[0]
noHT=noHT.groupby(by=noHT.columns, axis=1).mean()
noHT=noHT.dropna(how='any')
noHT.reset_index(inplace=True)
jj=noHT.melt(['source','target'])
jj.rename(columns={'variable':'time'},inplace=True)
jj['t']=jj['time']
# jj['time']=jj['time'].astype('int')+2000
# jj['time'] = pd.to_datetime(jj['time'], format='%Y')
# jj=jj[jj['value']>5]
jj['species']=jj['source'].str.split('_').str[2]
jj=jj.dropna(how='any')
return jj
# @delayed
# @wrap_non_picklable_objects
def rev_tbar(jj,XX,gg,species='all'):
data=jj[['species','target','time','t','value']]
# df=data.copy()
# data.reset_index(inplace=True)
data['sum']=pd.DataFrame(data.groupby(['species','t','target'])['value'].transform('sum'))
# jeff.reset_index(inplace=True)
del data['value']
data.drop_duplicates(inplace=True)
data.reset_index(inplace=True)
del data['index'],data['time']
jeff=pd.DataFrame(data.groupby(['species','t'])['sum'].nlargest(XX))
jeff.reset_index(inplace=True)
jeffA=data.iloc[jeff['level_2']]
tim_len=len(np.unique(jeffA['t']))
if species!='all':
jeff=jeff[jeff['species']==species]
JJ=pd.DataFrame()
rr=[]
for q,ee in enumerate((np.unique(jeff.species))):
jeff2=jeffA[jeffA['species']==ee]#.explode('target')
dd=pd.DataFrame(jeff2['target'].to_numpy().reshape(int(len(jeff2)/tim_len),tim_len,order='F'))
if len(dd.melt())==(tim_len*XX):
JJ=JJ.append(dd)
rr=np.append(rr, ee)
jeffA=jeffA.sort_values(['species', 't'], ascending=[True, True])
labels,levels=pd.factorize(sku.shuffle(JJ.melt()['value']))
cc=pd.DataFrame(np.array(labels).reshape((XX)*len(rr),tim_len,order='F'))
for i in np.arange(0,len(cc),XX+1):
for col in cc:
cc.iloc[i:i+XX,col] = cc.iloc[i:i+XX,col].sort_values(ignore_index=True)
cc.loc[i+XX]=0
plt.figure(figsize=(10,30))
ax=sns.heatmap(cc,cmap='rocket_r',annot=True, fmt="d",cbar=False,xticklabels=False,
yticklabels=False).set(ylabel=' - '.join(rr))
# plt.show()
data.to_csv('data/gcn/'+str(gg)+'.csv',sep='\t')
# Path("data/gcn/img/"+cc).mkdir(parents=True, exist_ok=True)
plt.savefig("data/gcn/img/full_"+str(gg)+"_10.png",dpi=300,bbox_inches = "tight")
def group_time_plot(noHT,steps,XX,spec_spec):
noHT.columns=noHT.columns.str.split('_').str[0]
noHT.columns=pd.qcut((noHT.columns).astype('int'), steps, labels=False)
noHT=noHT.groupby(by=noHT.columns, axis=1).mean()
noHT=noHT.dropna(how='all')
noHT.reset_index(inplace=True)
jj=noHT.melt(['source','target'])
jj.rename(columns={'variable':'time'},inplace=True)
jj['t']=jj['time']
# jj['time']=jj['time'].astype('int')+2000
# jj['time'] = pd.to_datetime(jj['time'], format='%Y')
# jj=jj[jj['value']>5]
jj['species']=jj['source'].str.split('_').str[2]
jj=jj.dropna(how='any')
jj['rank']=rankdata(jj.value,method='min')
XX=50 #10
# df = noHT[['species','target','time','rank']]
del jj['value'], jj['t'], jj['source']
if spec_spec=='1':
jeff=pd.DataFrame(jj.groupby(['species','time'])['rank_diff'].nlargest(XX))
jeff=jeff.dropna(how='any')
jeff.reset_index(inplace=True)
jeff2=jj.loc[jeff['level_2']]
else:
jeff=pd.DataFrame(jj.groupby(['time'])['rank'].nlargest(XX))
jeff=jeff.dropna(how='any')
jeff.reset_index(inplace=True)
jeff2=jj.loc[jeff['level_1']]
plt.figure(figsize=(15,5))
ax = sns.histplot(jeff2, x='time', hue='target', weights='rank',
multiple='stack', palette='icefire', shrink=0.6,bins=len(pd.unique(jeff2['time']))+5)
ax.set_ylabel('rank_noHT')
# ax.set_title(cc)
# Fix the legend so it's not on top of the bars.
# legend = ax.get_legend()
plt.legend([],[], frameon=False)
def time_order_net(control,case,thresh=10**-6,group='source',groups=6,rounder=1,math='mean'):
def preproc(data):
data.columns=data.columns.str.split('_').str[0]
data.columns=pd.qcut((data.columns).astype('int'), groups, labels=False)
noHTm=data.groupby(by=data.columns, axis=1).mean()
noHTm=noHTm.dropna(how='all')
noHTm.reset_index(inplace=True)
noHTv=data.groupby(by=data.columns, axis=1).var()
noHTv=noHTv.dropna(how='all')
noHTv.reset_index(inplace=True)
return noHTm,noHTv
noHTm,noHTv=preproc(control)
HTm,HTv=preproc(case)
if math=='mean':
BB=noHTm[noHTm[0]>thresh].dropna().groupby(group).mean()-HTm[HTm[0]>thresh].dropna().groupby(group).mean()
elif math=='median':
BB=noHTm[noHTm[0]>thresh].dropna().groupby(group).median()-HTm[HTm[0]>thresh].dropna().groupby(group).median()
BB=np.round(BB,rounder)
aa='(BB[0]>='
bb='(BB[0]<='
for i in np.arange(groups)[1:]:
cc='BB['+str(i)+'])&(BB['+str(i)+']>='
aa=aa+str(cc)
dd='BB['+str(i)+'])&(BB['+str(i)+']<='
bb=bb+str(dd)
grow=BB[eval(bb[:-9])]
die=BB[eval(aa[:-9])]
def proc_run(BBgrow,grow):
if len(BBgrow)>0:
BBgrow[groups]=BBgrow[0]-BBgrow[groups-1]
BBgrow=BBgrow[BBgrow[groups]!=0]
BBgrow.sort_values(by=groups,inplace=True)
del BBgrow[groups]
BBgrow.to_csv('data/gcn/comp_net/'+str(group)+'_'+str(thresh)+'_'+str(math)+'_'+str(groups)+'_'+grow+'.txt',sep='\t')
else:
BBgrow=0
return BBgrow
BBgrow=proc_run(grow,'grow')
BBdie=proc_run(die,'die')
return BBgrow,BBdie,noHTm,HTm
def build_gcn(i,net,cc,min_deg=5):
# relgene=pd.read_csv(path,sep='\t')
# # relgene=pd.read_csv('50_genefamilies-cpm.tsv')
# # relgene=pd.read_csv('hmp_subset_genefamilies-cpm.tsv',sep='\t',nrows=100)
# relgene['gene']=relgene['# Gene Family'].str.split('|').str[0]
# relgene=relgene[relgene['gene']!='UniRef90_unknown']
# relgene=relgene[relgene['gene']!='UNMAPPED']
# relgene.index=relgene['# Gene Family']
# del relgene['gene'], relgene['# Gene Family']
# # relgene=relgene/relgene.sum(axis=0)
# # relgene=relgene/relgene.sum(axis=0)
# relgene['gen']=relgene.index.str.split('|').str[1].str.split('.').str[0].tolist()
# relgene['spec']=relgene.index.str.split('.').str[1]#.str.split('.').str[0].tolist()
# relgene['spec'].replace('_',' ')
# relgene.index=relgene.index.str.split('|').str[0]
# relgene=relgene.dropna()
# cc=relgene.groupby(['# Gene Family','spec']).sum()
# cc=cc.reset_index()
# cc=cc.rename(columns={'# Gene Family':'gene'})
# ff=[]
# C=[]
# for i,net in enumerate(relgene.columns[1:-2]):
# pd.read_csv()
dd=cc[['spec','gene',net]]
dd=dd[dd[net]!=0]
ee=nx.from_pandas_edgelist(dd,source='spec',target='gene',edge_attr=net)
remove = [node for node,degree in dict(ee.degree()).items() if degree <min_deg]
ee.remove_nodes_from(remove)
# ff.append(ee)
B = nx.Graph()
B.add_nodes_from(dd['spec'], bipartite=0)
B.add_nodes_from(dd['gene'], bipartite=1)
B.add_edges_from(tuple(dd[['spec','gene']].itertuples(index=False, name=None)))
remove = [node for node,degree in dict(B.degree()).items() if degree <min_deg]
B.remove_nodes_from(remove)
# C.append(B)
return ee,B
# with open('data/gcn/NX_Emore_'+name+'.pkl', 'wb') as f:
# pickle.dump(ff, f)
# with open('data/gcn/BX_Emore_'+name+'.pkl', 'wb') as f:
# pickle.dump(C, f)
def buildSYNCSA(dd):
names=pd.unique(dd.columns.str.split('_').str[1]+'_'+dd.columns.str.split('_').str[2])[1:]
for i in names:
# ff.columns = ff.columns.str.strip('_x')
# ff.columns = ff.columns.str.strip('_y')
# i=i.split('_')[1]+'_'+i.split('_')[2]
ff=dd.loc[:,dd.columns.str.contains(i)]
ff[['source','target']]=dd[['source','target']]
ff=ff[ff['source'].str.contains('s__')]
ff=ff[ff['target'].str.contains('UniRef')]
ff.groupby('source').sum().transpose().to_csv('comm_'+i+'.csv')
ff.reset_index(inplace=True)
ff.set_index(['source', 'target'], inplace=True)
del ff['index']
ff.columns=(ff.columns.str.split('_').str[1]+'_'+ff.columns.str.split('_').str[2])
gg=ff.groupby(by=ff.columns, axis=1).sum()
traits=gg[[i]].reset_index().pivot('source','target',i).dropna(how='all',axis=1).replace(np.nan,0)
traits.to_csv('trait_'+i+'.csv')
def buildNestedNess():
C=pd.DataFrame(columns=['N','Q','I','type'])
D=[]
files=glob.glob('*.npz')
for i,j in enumerate(files):
d=np.load(j)
C.loc[i]=[float(d['N']),float(d['Q']),float(d['I']),j.split('_')[1]+'_'+j.split('_')[2]+'_'+j.split('_')[3].split('.')[0]]
def structural_analysis(ii,i,graphs,ARG_meta,rand,deg_rand):
# aa= rrr[['from','to','value']].values
ccc=nx.convert_matrix.to_pandas_edgelist(graphs[ii])
ee=nx.convert_matrix.to_pandas_edgelist(graphs[ii])
# cc['weight']=np.random.randn(len(cc))
pww=i
j=(i.split('-')[1])
i=(i.split('-')[0])
rrr=str(ARG_meta[ARG_meta['id']==i].index.item())+'_'+str(ARG_meta[ARG_meta['id']==i]['group'].item())+'_'+str(j)
ccc.rename(columns={ccc.columns[2]:rrr},inplace=True)
a,b=pd.factorize(ccc['source'])
c,d=pd.factorize(ccc['target'])
rrr=pd.DataFrame()
rrr['from']=a
rrr['to']=c
rrr['value']=1
sss=str(ARG_meta[ARG_meta['id']==i]['group'].item())+'_'+str(j)
Path('nest/'+sss).mkdir(parents=True, exist_ok=True)
# rrr[['from','to','value']].to_csv('~/nest/'+sss+'/'+str(ccc.columns[2])+'.csv',sep=' ',index=False,header=False)
aa= rrr[['from','to','value']].values
if rand==True: ## to randomize
aa=pd.DataFrame(aa)
ddd=aa.sample(frac=np.float(deg_rand), replace=False, random_state=1) ##degree randomized
rrr=aa[~aa.isin(ddd)].dropna(how='all')
ddd.reset_index(inplace=True)
del ddd['index']
sss=shuffle(ddd)
aa=pd.concat([rrr,sss])
aa=np.array(aa).astype(int)
nodes_cols = int(max(aa[j,1] for j in range(aa.shape[0]))+1)
nodes_rows= int(max(aa[j,0] for j in range(aa.shape[0]))+1)
matrix=np.zeros((nodes_rows,nodes_cols),dtype='int')
for j in range(aa.shape[0]):
matrix[aa[j,0],aa[j,1]] = 1
M=matrix
cols_degr=M.sum(axis=0)
row_degr=M.sum(axis=1)
R,C=M.shape #rows and cols
#Nestednes
# In-block nestedness with B=1
Cn_=[np.repeat(1, R),np.repeat(1, C)]
max_blockN=max(max(Cn_[0]),max(Cn_[1]))+1
lambdasN=extremal_bi.call_lambda_i(M,cols_degr,row_degr,Cn_[1],Cn_[0],max_blockN,True)
N=extremal_bi.calculate_Fitness(M,cols_degr,row_degr,lambdasN[0],lambdasN[1],True)
#Modularity Extremal
C_=extremal_bi.recursive_step(M,cols_degr,row_degr,.7,3,False)
max_blockQ=max(max(C_[0]),max(C_[1]))+1
lambdasQ=extremal_bi.call_lambda_i(M,cols_degr,row_degr,C_[1],C_[0],max_blockQ,False)
Q=extremal_bi.calculate_Fitness(M,cols_degr,row_degr,lambdasQ[0],lambdasQ[1],False)
# Inblock nestedness extremal
Ci_=extremal_bi.recursive_step(M,cols_degr,row_degr,.7,3,True)
max_blockI=max(max(Ci_[0]),max(Ci_[1]))+1
lambdasI=extremal_bi.call_lambda_i(M,cols_degr,row_degr,Ci_[1],Ci_[0],max_blockI,True)
I=extremal_bi.calculate_Fitness(M,cols_degr,row_degr,lambdasI[0],lambdasI[1],True)
zzz=[str(N),str(Q),str(I),sss]
print(zzz)
# np.savetxt('ARG_nest_test.txt', zzz, delimiter = '\t', fmt='%s')
with open("randB_ARG_nest.txt", "ab") as f:
np.savetxt(f,np.column_stack([str(N),str(Q),str(I),sss,pww]),delimiter = '\t', fmt='%s')
return N,Q,I,sss,pww
def shuffle_net(df, n=1, axis=0):
df = df.copy()
for _ in range(n):
df.apply(np.random.shuffle, axis=axis)
return df
#https://stackoverflow.com/questions/15772009/shuffling-permutating-a-dataframe-in-pandas
def create_data(path,rand):
C=pd.read_csv(path,header=0)#,'type','init'],sep='\t')
#### for randomized samples
C=C[C['name']!='name']
C['R']=C['R'].str.replace("False", "0")
# pd.unique(C['name'])
C=C[C['R']==rand]
del C['R']
# ####
C['type']=C['name'].str.split('_').str[1]+'_'+C['name'].str.split('_').str[2]
C['type']=C['type'].str.replace("_00", "_00ST")
# # C=C[~C['type'].str.contains("03")]
C['type']=C['type'].str.replace("_03ST", "_02ST")
C['type']=C['type'].str.replace("_00STST", "_00ST")
C['sample']=C['name'].str.split('_').str[0]
C=C[C['N']!='0.0']
# C=C[~C['name'].duplicated(keep='last')]
C=C[~C[['type','sample']].duplicated(keep='last')]
del C['name']
C.reset_index(inplace=True)
del C['index']
D=C.pivot(index='sample', columns='type', values=['N','I','Q'])
D=D.astype('float')
return D
def form_tests(data,var,level):
E0=data[var].reset_index()
E0=E0[['sample',level+'_00ST',level+'_01ST',level+'_02ST']]
E0['var']=var
return E0
def merge_form(data,level):
E0=form_tests(data,'N',level)
E1=form_tests(data,'I',level)
E2=form_tests(data,'Q',level)
E=E0.append(E1)
E=E.append(E2)
return E
def output_data(D):
E=merge_form(D,'CLA')
G=merge_form(D,'LEVO')
F=merge_form(D,'OTHER')
H=E.merge(G,on=['sample','var'])
H=H.merge(F,on=['sample','var'])
# H.set_index(['var','sample'],inplace=True)
# del H['var_x'],H['var_y']#,H0['type']
return H
def makeSYNCSAnet(relgene,graphs,JEFF,META,deg_rand):
# for i,net in tqdm.tqdm(enumerate(BX_graphs)):
# for ii,i in tqdm():
for ii,i in (enumerate(relgene.columns[1:])):
ccc=nx.convert_matrix.to_pandas_edgelist(graphs[ii])
ee=nx.convert_matrix.to_pandas_edgelist(graphs[ii])
# cc['weight']=np.random.randn(len(cc))
pww=i
j=(i.split('-')[1])
i=(i.split('-')[0])
try:
rrr=str(META[META['id']==i].index.item())+'_'+str(META[META['id']==i]['group'].item())+'_'+str(j)
ccc.rename(columns={ccc.columns[2]:rrr},inplace=True)
ddd=ccc[ccc['source'].str.contains('UniRef')]
ddd[['source','target']] = ddd[['target','source']]
ccc=ccc[~ccc['source'].str.contains('UniRef')].append(ddd)
if deg_rand!=0:
aa=pd.DataFrame(ccc)
pcc=aa.sample(frac=np.float(deg_rand), replace=False, random_state=1) ##degree randomized
pol=aa[~aa.isin(pcc)].dropna(how='all')
pcc.reset_index(inplace=True)
del pcc['index']
lll=shuffle_net(pcc)
ccc=pd.concat([pol,lll])
del aa,pol,pcc,lll
# a,b=pd.factorize(ccc['source'])
# c,d=pd.factorize(ccc['target'])
# rrr=pd.DataFrame()
# rrr['from']=a
# rrr['to']=c
# rrr['value']=1
# sss=str(META[META['id']==i]['group'].item())+'_'+str(j)
# Path('~/nest/'+sss).mkdir(parents=True, exist_ok=True)
# rrr[['from','to','value']].to_csv('~/nest/'+sss+'/'+str(ccc.columns[2])+'.csv',sep=' ',index=False,header=False)
# ee.rename(columns={ee.columns[2]:sss},inplace=True)
print(ii)
if ii==0:
dd=ccc
# ff=ee
else:
dd=dd.merge(ccc,on=['source','target'],how='outer')
# ff=ff.merge(ee,on=['source','target'],how='outer')
del ddd,rrr,ee,ccc
except:
print('no match for '+str(i))
return dd
# names=pd.unique(dd.columns.str.split('_').str[1]+'_'+dd.columns.str.split('_').str[2])[1:]
# for i in tqdm(names):
def group4SYNCSA(i,dd,DR):
# ff.columns = ff.columns.str.strip('_x')
# ff.columns = ff.columns.str.strip('_y')
# i=i.split('_')[1]+'_'+i.split('_')[2]
ff=dd.loc[:,dd.columns.str.contains(i)]
ff[['source','target']]=dd[['source','target']]
ff=ff[ff['source'].str.contains('s__')]
ff=ff[ff['target'].str.contains('UniRef')]
comm=ff.groupby('source').sum().transpose()
comm.to_csv('~/SYNCSA_eval/'+str(DR)+'_rand_comm_'+i+'.csv')
ff.reset_index(inplace=True)
ff.set_index(['source', 'target'], inplace=True)
del ff['index']
ff.columns=(ff.columns.str.split('_').str[1]+'_'+ff.columns.str.split('_').str[2])
gg=ff.groupby(by=ff.columns, axis=1).sum()
# traits=gg[[i]].reset_index().pivot('source','target',i).dropna(how='all',axis=1).replace(np.nan,0)
traits=gg[[i]].reset_index().groupby(['source','target']).mean().reset_index().pivot('source','target',i).dropna(how='all',axis=1).replace(np.nan,0)
traits.to_csv('~/SYNCSA_eval/'+str(DR)+'_rand_trait_'+i+'.csv')
def research_orthologs(uniID, species):
'''
research orthologs of a specific UniProtID in the inparanoid database available on the web
save the info in the neo4j database
'''
# import urllib
url="http://inparanoid.sbc.su.se/cgi-bin/gene_search.cgi?idtype=all;all_or_selection=all;scorelimit=0.05;rettype=xml;id=" + uniID
# request = request(url)
# for n in range(10):
response = request.urlopen(url)
txml = response.read()
root = ET.fromstring(txml)
for cluster in root.iter('cluster'):
# print(cluster)
c = cluster.attrib['nr'] #Cluster ID
for p in cluster.iter('protein'):
pid = p.attrib['prot_id']
spec = p.attrib['speclong']
if spec in species:
P=pid
S=spec
return P,S
def dgtz(A):
bis=np.round(np.sqrt(len(A))).astype(int)
# bisA=np.abs(np.round((np.round(np.min(A),-1)-np.round(np.max(A),-1))/bis))
# binA=np.arange(np.round(np.min(A),-1),np.round(np.max(A),-1),bisA)
bisA=np.abs(np.round(np.min(A))-(np.max(A))/bis)
binA=np.arange((np.min(A)),(np.max(A)),bisA)
tmpA=np.digitize(A,bins=binA)
return tmpA,bis
def shan_entropy(c):
c_normalized = c / (np.sum(c))
c_normalized = c_normalized[np.nonzero(c_normalized)]
H = -sum(c_normalized* np.log2(c_normalized))
return H
def get_red(c_X,c_Y,c_Z,c_XZ,c_YZ):
SI0=np.sum(np.nan_to_num(np.divide(c_XZ,c_Z)*(np.log10(np.divide(c_XZ,(np.matmul(c_X,c_Z))))), posinf=0, neginf=0),axis=0)
SI1=np.sum(np.nan_to_num(np.divide(c_YZ,c_Z)*(np.log10(np.divide(c_YZ,(np.matmul(c_Y,c_Z))))), posinf=0, neginf=0),axis=0) ##maybe along axis=1
minSI=np.min([SI0,SI1])
red=np.sum((c_Z)*minSI)
return red,SI0,SI1
def calc_MI(H_X,H_Y,H_XY):
return H_X + H_Y - H_XY
def calc_CMI(H_XZ,H_YZ,H_XYZ,H_Z):
return H_XZ+H_YZ+H_XYZ-H_Z
def calc_II(CMI_XY,MI_XY):
return CMI_XY-MI_XY
# cdf=[]
# pucN=[]
# data=pd.read_csv('data/Pipeline_consolidate_220301/UP_gene_sum_Lx.txt',sep='\t',header=None,index_col=0)
# data=data[1:12]
# for i in np.arange(1,len(data)):
# @jit(nopython=True)
def puc_cal(data,i,genes,cdf,puc_genes):
if cdf!=[]:
tiss=str(cdf)
cdf=[]
if puc_genes!=[]:
omic=str(puc_genes)
puc_genes=[]
for j in (np.arange(1,len(data))):#,position=1, desc="j", leave=False, colour='magenta'):
pucN=[];
for k in (np.arange(1,len(data))):#,position=2, desc="k", leave=False, colour='cyan'):
# try:
# time.sleep(0.5)
puc=0
if (i!=j)&(i!=k)&(j!=k):
# print(i,j,k)
A=np.array(data.iloc[i]).tolist()
B=np.array(data.iloc[j]).tolist()
C=np.array(data.iloc[k]).tolist()
# print(A)
# A=np.array(data[1]).tolist()
# B=np.array(data[2]).tolist()
# C=np.array(data[3]).tolist()
tmpA,ba=dgtz(A);
tmpB,bb=dgtz(B);
tmpC,cc=dgtz(C)
A=tmpA/np.sum(tmpA);B=tmpB/np.sum(tmpB);C=tmpC/np.sum(tmpC);
b=np.round(np.sqrt(len(A))).astype(int)
c_XY = np.histogramdd([A,B],bins=(b,b))[0]
c_XZ = np.histogramdd([A,C],bins=(b,b))[0]
c_YZ = np.histogramdd([B,C],bins=(b,b))[0]
c_XYZ = np.histogramdd([A,B,C],bins=(b,b,b))[0]
c_Z = np.histogramdd([C],bins=(b))[0]
c_Y = np.histogramdd([B],bins=(b))[0]
c_X = np.histogramdd([A],bins=(b))[0]
# H=[]
# for ii,i in enumerate([c_X,c_Y,c_Z,c_XY,c_XZ,c_YZ,c_XYZ]):
# i.split('_')[1]
H_X= shan_entropy(c_X)
H_Y = shan_entropy(c_Y)
H_Z = shan_entropy(c_Z)
H_XY = shan_entropy(c_XY)
H_XZ = shan_entropy(c_XZ)
H_YZ = shan_entropy(c_YZ)
H_XYZ = shan_entropy(c_XYZ)
MIxy=calc_MI(H_X,H_Y,H_XY)
MIxz=calc_MI(H_X,H_Z,H_XZ)
MIyz=calc_MI(H_Z,H_Y,H_YZ)
CMIx=calc_CMI(H_XZ,H_XY,H_XYZ,H_X)
CMIy=calc_CMI(H_XY,H_YZ,H_XYZ,H_Y)
CMIz=calc_CMI(H_XZ,H_YZ,H_XYZ,H_Z)
[calc_II(CMIz,MIxy),
calc_II(CMIy,MIxz),
calc_II(CMIx,MIyz)]
rX=get_red(c_X,c_Z,c_Y,c_XY,c_XZ)
rY=get_red(c_X,c_Y,c_Z,c_XZ,c_YZ)
rZ=get_red(c_Y,c_X,c_Z,c_YZ,c_XY)
# # [rX[0],rY[0],rZ[0]]
# Szxy=calc_II(CMIz,MIxy)+rX[0]
u_xy=MIxy-rZ[0]
u_xz=MIxz-rY[0]
u_yz=MIyz-rX[0]
# PID=Szxy + u_xz + u_yz + rZ[0]
# PID
# if j!=k:
puc+=(u_xy/MIxy)
# print(puc)
# else:
# puc.append(1)
# except:
# print(i,j,k)
# print(puc)
# pucN.append(puc)
# if i!=j:
cdf.append(puc)
puc_genes.append([genes[i],genes[j]])
# if (i!=j)&(i!=k)&(j!=k):
# cdf.append(pucN)
with open('data/Pipeline_consolidate_220301/gcn/'+tiss+'_'+omic+'_cdf.pkl', 'wb') as f:
pickle.dump(cdf, f)
with open('data/Pipeline_consolidate_220301/gcn/'+tiss+'_'+omic+'_puc_genes.pkl', 'wb') as f:
pickle.dump(puc_genes, f)
# p = Path('data/Pipeline_consolidate_220301/gcn/'+tiss+'_'+omic+'_cdf.npy')
# with p.open('ab') as f:
# np.save(f, cdf)
# p = Path('data/Pipeline_consolidate_220301/gcn/'+tiss+'_'+omic+'_puc_genes.npy')
# with p.open('ab') as f:
# np.save(f, puc_genes)
# return cdf, puc_genes | [
"sys.path.insert",
"pandas.read_csv",
"numpy.array",
"seaborn.violinplot",
"networkx.convert_matrix.to_pandas_edgelist",
"numpy.arange",
"numpy.divide",
"pandas.unique",
"numpy.repeat",
"numpy.histogramdd",
"seaborn.despine",
"pathlib.Path",
"extremal_bi.calculate_Fitness",
"networkx.from_... | [((561, 605), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""./nestedness_analysis/"""'], {}), "(1, './nestedness_analysis/')\n", (576, 605), False, 'import os, glob, sys, importlib, pickle\n'), ((917, 1022), 'pandas.read_csv', 'pd.read_csv', (["('data/gcn/cc_' + patt + '.txt')"], {'index_col': '(False)', 'sep': '"""\t"""', 'usecols': "['spec', 'gene', net]"}), "('data/gcn/cc_' + patt + '.txt', index_col=False, sep='\\t',\n usecols=['spec', 'gene', net])\n", (928, 1022), True, 'import pandas as pd\n'), ((1285, 1295), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1293, 1295), True, 'import networkx as nx\n'), ((1624, 1696), 'networkx.from_pandas_edgelist', 'nx.from_pandas_edgelist', (['dd'], {'source': '"""spec"""', 'target': '"""gene"""', 'edge_attr': 'net'}), "(dd, source='spec', target='gene', edge_attr=net)\n", (1647, 1696), True, 'import networkx as nx\n'), ((3454, 3482), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 30)'}), '(figsize=(10, 30))\n', (3464, 3482), True, 'import matplotlib.pyplot as plt\n'), ((3486, 3518), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""whitegrid"""'}), "(style='whitegrid')\n", (3499, 3518), True, 'import seaborn as sns\n'), ((3524, 3636), 'seaborn.violinplot', 'sns.violinplot', ([], {'data': 'df', 'y': '"""gen"""', 'x': '"""value"""', 'hue': '"""type"""', 'split': '(True)', 'inner': '"""quart"""', 'linewidth': '(1)', 'orient': '"""h"""'}), "(data=df, y='gen', x='value', hue='type', split=True, inner=\n 'quart', linewidth=1, orient='h')\n", (3538, 3636), True, 'import seaborn as sns\n'), ((3673, 3695), 'seaborn.despine', 'sns.despine', ([], {'left': '(True)'}), '(left=True)\n', (3684, 3695), True, 'import seaborn as sns\n'), ((4680, 4703), 'numpy.unique', 'np.unique', (['jeff.species'], {}), '(jeff.species)\n', (4689, 4703), True, 'import numpy as np\n'), ((6778, 6792), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6790, 6792), True, 'import pandas as pd\n'), ((7524, 7552), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 30)'}), '(figsize=(10, 30))\n', (7534, 7552), True, 'import matplotlib.pyplot as plt\n'), ((8599, 8631), 'scipy.stats.rankdata', 'rankdata', (['jj.value'], {'method': '"""min"""'}), "(jj.value, method='min')\n", (8607, 8631), False, 'from scipy.stats import rankdata\n'), ((9169, 9196), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (9179, 9196), True, 'import matplotlib.pyplot as plt\n'), ((9510, 9543), 'matplotlib.pyplot.legend', 'plt.legend', (['[]', '[]'], {'frameon': '(False)'}), '([], [], frameon=False)\n', (9520, 9543), True, 'import matplotlib.pyplot as plt\n'), ((10449, 10470), 'numpy.round', 'np.round', (['BB', 'rounder'], {}), '(BB, rounder)\n', (10457, 10470), True, 'import numpy as np\n'), ((12466, 12538), 'networkx.from_pandas_edgelist', 'nx.from_pandas_edgelist', (['dd'], {'source': '"""spec"""', 'target': '"""gene"""', 'edge_attr': 'net'}), "(dd, source='spec', target='gene', edge_attr=net)\n", (12489, 12538), True, 'import networkx as nx\n'), ((12682, 12692), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (12690, 12692), True, 'import networkx as nx\n'), ((14209, 14254), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['N', 'Q', 'I', 'type']"}), "(columns=['N', 'Q', 'I', 'type'])\n", (14221, 14254), True, 'import pandas as pd\n'), ((14271, 14289), 'glob.glob', 'glob.glob', (['"""*.npz"""'], {}), "('*.npz')\n", (14280, 14289), False, 'import os, glob, sys, importlib, pickle\n'), ((14597, 14645), 'networkx.convert_matrix.to_pandas_edgelist', 'nx.convert_matrix.to_pandas_edgelist', (['graphs[ii]'], {}), '(graphs[ii])\n', (14633, 14645), True, 'import networkx as nx\n'), ((14653, 14701), 'networkx.convert_matrix.to_pandas_edgelist', 'nx.convert_matrix.to_pandas_edgelist', (['graphs[ii]'], {}), '(graphs[ii])\n', (14689, 14701), True, 'import networkx as nx\n'), ((14993, 15020), 'pandas.factorize', 'pd.factorize', (["ccc['source']"], {}), "(ccc['source'])\n", (15005, 15020), True, 'import pandas as pd\n'), ((15030, 15057), 'pandas.factorize', 'pd.factorize', (["ccc['target']"], {}), "(ccc['target'])\n", (15042, 15057), True, 'import pandas as pd\n'), ((15066, 15080), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15078, 15080), True, 'import pandas as pd\n'), ((15935, 15982), 'numpy.zeros', 'np.zeros', (['(nodes_rows, nodes_cols)'], {'dtype': '"""int"""'}), "((nodes_rows, nodes_cols), dtype='int')\n", (15943, 15982), True, 'import numpy as np\n'), ((16301, 16388), 'extremal_bi.call_lambda_i', 'extremal_bi.call_lambda_i', (['M', 'cols_degr', 'row_degr', 'Cn_[1]', 'Cn_[0]', 'max_blockN', '(True)'], {}), '(M, cols_degr, row_degr, Cn_[1], Cn_[0],\n max_blockN, True)\n', (16326, 16388), False, 'import extremal_bi\n'), ((16385, 16475), 'extremal_bi.calculate_Fitness', 'extremal_bi.calculate_Fitness', (['M', 'cols_degr', 'row_degr', 'lambdasN[0]', 'lambdasN[1]', '(True)'], {}), '(M, cols_degr, row_degr, lambdasN[0], lambdasN\n [1], True)\n', (16414, 16475), False, 'import extremal_bi\n'), ((16499, 16564), 'extremal_bi.recursive_step', 'extremal_bi.recursive_step', (['M', 'cols_degr', 'row_degr', '(0.7)', '(3)', '(False)'], {}), '(M, cols_degr, row_degr, 0.7, 3, False)\n', (16525, 16564), False, 'import extremal_bi\n'), ((16616, 16702), 'extremal_bi.call_lambda_i', 'extremal_bi.call_lambda_i', (['M', 'cols_degr', 'row_degr', 'C_[1]', 'C_[0]', 'max_blockQ', '(False)'], {}), '(M, cols_degr, row_degr, C_[1], C_[0], max_blockQ,\n False)\n', (16641, 16702), False, 'import extremal_bi\n'), ((16699, 16790), 'extremal_bi.calculate_Fitness', 'extremal_bi.calculate_Fitness', (['M', 'cols_degr', 'row_degr', 'lambdasQ[0]', 'lambdasQ[1]', '(False)'], {}), '(M, cols_degr, row_degr, lambdasQ[0], lambdasQ\n [1], False)\n', (16728, 16790), False, 'import extremal_bi\n'), ((16824, 16888), 'extremal_bi.recursive_step', 'extremal_bi.recursive_step', (['M', 'cols_degr', 'row_degr', '(0.7)', '(3)', '(True)'], {}), '(M, cols_degr, row_degr, 0.7, 3, True)\n', (16850, 16888), False, 'import extremal_bi\n'), ((16942, 17029), 'extremal_bi.call_lambda_i', 'extremal_bi.call_lambda_i', (['M', 'cols_degr', 'row_degr', 'Ci_[1]', 'Ci_[0]', 'max_blockI', '(True)'], {}), '(M, cols_degr, row_degr, Ci_[1], Ci_[0],\n max_blockI, True)\n', (16967, 17029), False, 'import extremal_bi\n'), ((17026, 17116), 'extremal_bi.calculate_Fitness', 'extremal_bi.calculate_Fitness', (['M', 'cols_degr', 'row_degr', 'lambdasI[0]', 'lambdasI[1]', '(True)'], {}), '(M, cols_degr, row_degr, lambdasI[0], lambdasI\n [1], True)\n', (17055, 17116), False, 'import extremal_bi\n'), ((17671, 17698), 'pandas.read_csv', 'pd.read_csv', (['path'], {'header': '(0)'}), '(path, header=0)\n', (17682, 17698), True, 'import pandas as pd\n'), ((22844, 22864), 'urllib.request.urlopen', 'request.urlopen', (['url'], {}), '(url)\n', (22859, 22864), False, 'from urllib import request\n'), ((22903, 22922), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['txml'], {}), '(txml)\n', (22916, 22922), True, 'import xml.etree.ElementTree as ET\n'), ((23573, 23598), 'numpy.digitize', 'np.digitize', (['A'], {'bins': 'binA'}), '(A, bins=binA)\n', (23584, 23598), True, 'import numpy as np\n'), ((24120, 24138), 'numpy.min', 'np.min', (['[SI0, SI1]'], {}), '([SI0, SI1])\n', (24126, 24138), True, 'import numpy as np\n'), ((24146, 24165), 'numpy.sum', 'np.sum', (['(c_Z * minSI)'], {}), '(c_Z * minSI)\n', (24152, 24165), True, 'import numpy as np\n'), ((2135, 2149), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2146, 2149), False, 'import os, glob, sys, importlib, pickle\n'), ((3102, 3119), 'numpy.sum', 'np.sum', (['non.value'], {}), '(non.value)\n', (3108, 3119), True, 'import numpy as np\n'), ((3275, 3292), 'numpy.sum', 'np.sum', (['yes.value'], {}), '(yes.value)\n', (3281, 3292), True, 'import numpy as np\n'), ((3898, 3932), 'scipy.stats.rankdata', 'rankdata', (['data.value'], {'method': '"""min"""'}), "(data.value, method='min')\n", (3906, 3932), False, 'from scipy.stats import rankdata\n'), ((4881, 4908), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (4891, 4908), True, 'import matplotlib.pyplot as plt\n'), ((5247, 5280), 'matplotlib.pyplot.legend', 'plt.legend', (['[]', '[]'], {'frameon': '(False)'}), '([], [], frameon=False)\n', (5257, 5280), True, 'import matplotlib.pyplot as plt\n'), ((6681, 6702), 'numpy.unique', 'np.unique', (["jeffA['t']"], {}), "(jeffA['t'])\n", (6690, 6702), True, 'import numpy as np\n'), ((6830, 6853), 'numpy.unique', 'np.unique', (['jeff.species'], {}), '(jeff.species)\n', (6839, 6853), True, 'import numpy as np\n'), ((10519, 10536), 'numpy.arange', 'np.arange', (['groups'], {}), '(groups)\n', (10528, 10536), True, 'import numpy as np\n'), ((14333, 14343), 'numpy.load', 'np.load', (['j'], {}), '(j)\n', (14340, 14343), True, 'import numpy as np\n'), ((15471, 15487), 'pandas.DataFrame', 'pd.DataFrame', (['aa'], {}), '(aa)\n', (15483, 15487), True, 'import pandas as pd\n'), ((15733, 15754), 'pandas.concat', 'pd.concat', (['[rrr, sss]'], {}), '([rrr, sss])\n', (15742, 15754), True, 'import pandas as pd\n'), ((16209, 16224), 'numpy.repeat', 'np.repeat', (['(1)', 'R'], {}), '(1, R)\n', (16218, 16224), True, 'import numpy as np\n'), ((16225, 16240), 'numpy.repeat', 'np.repeat', (['(1)', 'C'], {}), '(1, C)\n', (16234, 16240), True, 'import numpy as np\n'), ((19347, 19395), 'networkx.convert_matrix.to_pandas_edgelist', 'nx.convert_matrix.to_pandas_edgelist', (['graphs[ii]'], {}), '(graphs[ii])\n', (19383, 19395), True, 'import networkx as nx\n'), ((19407, 19455), 'networkx.convert_matrix.to_pandas_edgelist', 'nx.convert_matrix.to_pandas_edgelist', (['graphs[ii]'], {}), '(graphs[ii])\n', (19443, 19455), True, 'import networkx as nx\n'), ((23535, 23544), 'numpy.min', 'np.min', (['A'], {}), '(A)\n', (23541, 23544), True, 'import numpy as np\n'), ((23547, 23556), 'numpy.max', 'np.max', (['A'], {}), '(A)\n', (23553, 23556), True, 'import numpy as np\n'), ((23664, 23673), 'numpy.sum', 'np.sum', (['c'], {}), '(c)\n', (23670, 23673), True, 'import numpy as np\n'), ((23707, 23731), 'numpy.nonzero', 'np.nonzero', (['c_normalized'], {}), '(c_normalized)\n', (23717, 23731), True, 'import numpy as np\n'), ((3995, 4031), 'scipy.stats.rankdata', 'rankdata', (['data.value_x'], {'method': '"""min"""'}), "(data.value_x, method='min')\n", (4003, 4031), False, 'from scipy.stats import rankdata\n'), ((4050, 4086), 'scipy.stats.rankdata', 'rankdata', (['data.value_y'], {'method': '"""min"""'}), "(data.value_y, method='min')\n", (4058, 4086), False, 'from scipy.stats import rankdata\n'), ((7107, 7124), 'numpy.append', 'np.append', (['rr', 'ee'], {}), '(rr, ee)\n', (7116, 7124), True, 'import numpy as np\n'), ((7559, 7666), 'seaborn.heatmap', 'sns.heatmap', (['cc'], {'cmap': '"""rocket_r"""', 'annot': '(True)', 'fmt': '"""d"""', 'cbar': '(False)', 'xticklabels': '(False)', 'yticklabels': '(False)'}), "(cc, cmap='rocket_r', annot=True, fmt='d', cbar=False,\n xticklabels=False, yticklabels=False)\n", (7570, 7666), True, 'import seaborn as sns\n'), ((15206, 15225), 'pathlib.Path', 'Path', (["('nest/' + sss)"], {}), "('nest/' + sss)\n", (15210, 15225), False, 'from pathlib import Path\n'), ((27860, 27879), 'pickle.dump', 'pickle.dump', (['cdf', 'f'], {}), '(cdf, f)\n', (27871, 27879), False, 'import os, glob, sys, importlib, pickle\n'), ((27994, 28019), 'pickle.dump', 'pickle.dump', (['puc_genes', 'f'], {}), '(puc_genes, f)\n', (28005, 28019), False, 'import os, glob, sys, importlib, pickle\n'), ((2675, 2690), 'pandas.DataFrame', 'pd.DataFrame', (['S'], {}), '(S)\n', (2687, 2690), True, 'import pandas as pd\n'), ((2710, 2725), 'pandas.DataFrame', 'pd.DataFrame', (['T'], {}), '(T)\n', (2722, 2725), True, 'import pandas as pd\n'), ((5288, 5314), 'pathlib.Path', 'Path', (["('data/gcn/img/' + cc)"], {}), "('data/gcn/img/' + cc)\n", (5292, 5314), False, 'from pathlib import Path\n'), ((7285, 7301), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (7293, 7301), True, 'import numpy as np\n'), ((15515, 15533), 'numpy.float', 'np.float', (['deg_rand'], {}), '(deg_rand)\n', (15523, 15533), True, 'import numpy as np\n'), ((15765, 15777), 'numpy.array', 'np.array', (['aa'], {}), '(aa)\n', (15773, 15777), True, 'import numpy as np\n'), ((20003, 20020), 'pandas.DataFrame', 'pd.DataFrame', (['ccc'], {}), '(ccc)\n', (20015, 20020), True, 'import pandas as pd\n'), ((20319, 20340), 'pandas.concat', 'pd.concat', (['[pol, lll]'], {}), '([pol, lll])\n', (20328, 20340), True, 'import pandas as pd\n'), ((23487, 23496), 'numpy.min', 'np.min', (['A'], {}), '(A)\n', (23493, 23496), True, 'import numpy as np\n'), ((23499, 23508), 'numpy.max', 'np.max', (['A'], {}), '(A)\n', (23505, 23508), True, 'import numpy as np\n'), ((23760, 23781), 'numpy.log2', 'np.log2', (['c_normalized'], {}), '(c_normalized)\n', (23767, 23781), True, 'import numpy as np\n'), ((23864, 23884), 'numpy.divide', 'np.divide', (['c_XZ', 'c_Z'], {}), '(c_XZ, c_Z)\n', (23873, 23884), True, 'import numpy as np\n'), ((23991, 24011), 'numpy.divide', 'np.divide', (['c_YZ', 'c_Z'], {}), '(c_YZ, c_Z)\n', (24000, 24011), True, 'import numpy as np\n'), ((9339, 9363), 'pandas.unique', 'pd.unique', (["jeff2['time']"], {}), "(jeff2['time'])\n", (9348, 9363), True, 'import pandas as pd\n'), ((25508, 25520), 'numpy.sum', 'np.sum', (['tmpA'], {}), '(tmpA)\n', (25514, 25520), True, 'import numpy as np\n'), ((25528, 25540), 'numpy.sum', 'np.sum', (['tmpB'], {}), '(tmpB)\n', (25534, 25540), True, 'import numpy as np\n'), ((25548, 25560), 'numpy.sum', 'np.sum', (['tmpC'], {}), '(tmpC)\n', (25554, 25560), True, 'import numpy as np\n'), ((25642, 25677), 'numpy.histogramdd', 'np.histogramdd', (['[A, B]'], {'bins': '(b, b)'}), '([A, B], bins=(b, b))\n', (25656, 25677), True, 'import numpy as np\n'), ((25701, 25736), 'numpy.histogramdd', 'np.histogramdd', (['[A, C]'], {'bins': '(b, b)'}), '([A, C], bins=(b, b))\n', (25715, 25736), True, 'import numpy as np\n'), ((25760, 25795), 'numpy.histogramdd', 'np.histogramdd', (['[B, C]'], {'bins': '(b, b)'}), '([B, C], bins=(b, b))\n', (25774, 25795), True, 'import numpy as np\n'), ((25820, 25861), 'numpy.histogramdd', 'np.histogramdd', (['[A, B, C]'], {'bins': '(b, b, b)'}), '([A, B, C], bins=(b, b, b))\n', (25834, 25861), True, 'import numpy as np\n'), ((25882, 25909), 'numpy.histogramdd', 'np.histogramdd', (['[C]'], {'bins': 'b'}), '([C], bins=b)\n', (25896, 25909), True, 'import numpy as np\n'), ((25936, 25963), 'numpy.histogramdd', 'np.histogramdd', (['[B]'], {'bins': 'b'}), '([B], bins=b)\n', (25950, 25963), True, 'import numpy as np\n'), ((25990, 26017), 'numpy.histogramdd', 'np.histogramdd', (['[A]'], {'bins': 'b'}), '([A], bins=b)\n', (26004, 26017), True, 'import numpy as np\n'), ((5057, 5078), 'pandas.unique', 'pd.unique', (['jeff2.time'], {}), '(jeff2.time)\n', (5066, 5078), True, 'import pandas as pd\n'), ((20056, 20074), 'numpy.float', 'np.float', (['deg_rand'], {}), '(deg_rand)\n', (20064, 20074), True, 'import numpy as np\n'), ((23910, 23929), 'numpy.matmul', 'np.matmul', (['c_X', 'c_Z'], {}), '(c_X, c_Z)\n', (23919, 23929), True, 'import numpy as np\n'), ((24037, 24056), 'numpy.matmul', 'np.matmul', (['c_Y', 'c_Z'], {}), '(c_Y, c_Z)\n', (24046, 24056), True, 'import numpy as np\n'), ((25110, 25132), 'numpy.array', 'np.array', (['data.iloc[i]'], {}), '(data.iloc[i])\n', (25118, 25132), True, 'import numpy as np\n'), ((25160, 25182), 'numpy.array', 'np.array', (['data.iloc[j]'], {}), '(data.iloc[j])\n', (25168, 25182), True, 'import numpy as np\n'), ((25210, 25232), 'numpy.array', 'np.array', (['data.iloc[k]'], {}), '(data.iloc[k])\n', (25218, 25232), True, 'import numpy as np\n')] |
import numpy as np
def dtw(series_1, series_2, norm_func = np.linalg.norm):
matrix = np.zeros((len(series_1) + 1, len(series_2) + 1))
matrix[0,:] = np.inf
matrix[:,0] = np.inf
matrix[0,0] = 0
for i, vec1 in enumerate(series_1):
for j, vec2 in enumerate(series_2):
cost = norm_func(vec1 - vec2)
matrix[i + 1, j + 1] = cost + min(matrix[i, j + 1], matrix[i + 1, j], matrix[i, j])
matrix = matrix[1:,1:]
i = matrix.shape[0] - 1
j = matrix.shape[1] - 1
matches = []
mappings_series_1 = [list() for v in range(matrix.shape[0])]
mappings_series_2 = [list() for v in range(matrix.shape[1])]
while i > 0 or j > 0:
matches.append((i, j))
mappings_series_1[i].append(j)
mappings_series_2[j].append(i)
option_diag = matrix[i - 1, j - 1] if i > 0 and j > 0 else np.inf
option_up = matrix[i - 1, j] if i > 0 else np.inf
option_left = matrix[i, j - 1] if j > 0 else np.inf
move = np.argmin([option_diag, option_up, option_left])
if move == 0:
i -= 1
j -= 1
elif move == 1:
i -= 1
else:
j -= 1
matches.append((0, 0))
mappings_series_1[0].append(0)
mappings_series_2[0].append(0)
matches.reverse()
for mp in mappings_series_1:
mp.reverse()
for mp in mappings_series_2:
mp.reverse()
return matches, matrix[-1, -1], mappings_series_1, mappings_series_2, matrix
| [
"numpy.argmin"
] | [((901, 949), 'numpy.argmin', 'np.argmin', (['[option_diag, option_up, option_left]'], {}), '([option_diag, option_up, option_left])\n', (910, 949), True, 'import numpy as np\n')] |
"""Bayesian polynomial mixture model."""
# pylint: disable=invalid-name
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
class BayesianPolynomialMixture: # pylint: disable=too-few-public-methods
"""Handles creation of a polynomial mixture model."""
def __init__(self, num_components=5, polynomial_degree=3):
"""Creates polynomial mixture with given mixture components of given degree."""
self.num_components = num_components
self.polynomial_degree = polynomial_degree
self.coefficient_precisions = [10.0 ** x for x in range(self.polynomial_degree + 1)]
self.concentration = np.array([0.1 for _ in range(self.num_components)])
self.wishart_df = self.polynomial_degree + 2.0
self.student_df = 2
def create_model(self, X):
"""Defines the joint distribution of the mixture model."""
precision_scale = np.repeat(np.expand_dims(self.coefficient_precisions, 0), self.num_components, axis=0)
joint_distribution = tfd.JointDistributionNamed(
dict(
precision=tfd.Independent(
tfd.WishartLinearOperator(
df=self.wishart_df,
scale=tf.linalg.LinearOperatorDiag(precision_scale),
input_output_cholesky=True,
name="precision",
),
reinterpreted_batch_ndims=1,
),
coefficients=lambda precision: tfd.Independent(
tfd.MultivariateNormalTriL(
loc=0, scale_tril=tfb.MatrixInverseTriL()(precision), name="coefficients"
),
reinterpreted_batch_ndims=1,
),
scale=tfd.HalfCauchy(loc=np.float64(0.0), scale=np.float64(1.0), name="noise_scale"),
mixture_probs=tfd.Dirichlet(concentration=self.concentration, name="mixture_probs"),
mixture=lambda mixture_probs, coefficients, scale: tfd.Sample(
tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(probs=mixture_probs, name="mixture_distribution"),
components_distribution=tfd.StudentT(
df=self.student_df,
loc=tf.linalg.matmul(X, coefficients, transpose_b=True),
scale=scale,
name="sample_likelihood",
),
name="mixture_components",
),
sample_shape=1,
),
),
name="joint_distribution",
)
return joint_distribution
| [
"numpy.float64",
"tensorflow.linalg.LinearOperatorDiag",
"numpy.expand_dims",
"tensorflow.linalg.matmul"
] | [((973, 1019), 'numpy.expand_dims', 'np.expand_dims', (['self.coefficient_precisions', '(0)'], {}), '(self.coefficient_precisions, 0)\n', (987, 1019), True, 'import numpy as np\n'), ((1863, 1878), 'numpy.float64', 'np.float64', (['(0.0)'], {}), '(0.0)\n', (1873, 1878), True, 'import numpy as np\n'), ((1886, 1901), 'numpy.float64', 'np.float64', (['(1.0)'], {}), '(1.0)\n', (1896, 1901), True, 'import numpy as np\n'), ((1289, 1334), 'tensorflow.linalg.LinearOperatorDiag', 'tf.linalg.LinearOperatorDiag', (['precision_scale'], {}), '(precision_scale)\n', (1317, 1334), True, 'import tensorflow as tf\n'), ((2401, 2452), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['X', 'coefficients'], {'transpose_b': '(True)'}), '(X, coefficients, transpose_b=True)\n', (2417, 2452), True, 'import tensorflow as tf\n')] |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the formulation of the SSAM model.
Summary:
This code calculates the local angle of attack and sideslip on the kite
aerodynamic surfaces assuming rigid body mechanics about the c.g. of the
kite.
"""
from makani.analysis.aero.hover_model import hover_model
import numpy as np
class SSAMModel(object):
"""Class used to determine local angles of attack on kite sections."""
def __init__(self, wing_model, wing_serial):
"""Initializes the SSAM model.
Args:
wing_model: Wing model (e.g. 'm600').
wing_serial: String giving the desired wing serial number (e.g. '01').
"""
# Use the hover model to obtain the kite main wing panels.
self._kite_params = hover_model.GetParams(wing_model, wing_serial,
use_wake_model=False)
# Rigid body mechanics require understanding of the c.g. location as the
# wing is defined to rotate about the body c.g. and the freestream velocity
# is assumed uniform everywhere.
self._cg_loc_b = self._kite_params['center_of_mass_pos']
def GetMainWingAlphas(self, angular_rate_b, apparent_wind_b):
"""Computes the local alpha values on the main wing.
Args:
angular_rate_b: Array of shape (n,3) containing kite body rates.
apparent_wind_b: Array of shape (n,3) containing apparent wind velocity
components.
Returns:
main_wing_alphas_deg: Array of shape (n, x) containing the kite main wing
local alpha values, where x is the number of kite
main wing panels.
"""
assert len(np.shape(angular_rate_b)) == 2
assert np.shape(angular_rate_b)[1]
assert np.shape(angular_rate_b) == np.shape(apparent_wind_b)
# Pitch rate is ignored as it does not participate in the heaving motion of
# any of the wing airfoils.
angular_rate_b[:, 1] = 0.0
# Compute alpha values for each plane contained in the hover model where the
# panel is located on the main wing.
main_wing_alphas_deg = np.array([])
for panel in self._kite_params['panels']:
if panel['name'].startswith('Wing'):
panel_ac_pos_b = panel['pos_b']
panel_relative_incidence_deg = np.rad2deg(panel['relative_incidence'])
# It is assumed that the kite rotates about its c.g.
r_panel = panel_ac_pos_b - self._cg_loc_b
# Expand the stationary r-position and reorient to match the omega
# array size to enable the cross product.
r_panel = np.repeat(np.expand_dims(r_panel, axis=1).transpose(),
np.shape(angular_rate_b)[0], axis=0)
panel_alpha_deg, _ = _ComputeRelativeAlphaBeta(angular_rate_b, r_panel,
apparent_wind_b)
# Account for washout if necessary.
panel_alpha_deg += panel_relative_incidence_deg
panel_alpha_deg = np.expand_dims(panel_alpha_deg, axis=1)
if np.shape(main_wing_alphas_deg)[0] != np.shape(panel_alpha_deg)[0]:
main_wing_alphas_deg = panel_alpha_deg
else:
main_wing_alphas_deg = np.concatenate((main_wing_alphas_deg,
panel_alpha_deg), axis=1)
return main_wing_alphas_deg
def _ComputeRelativeAlphaBeta(omega_b, position_b, apparent_wind_b):
"""Computes the relative alpha and beta values, in degrees, from kinematics.
Args:
omega_b: Array of size (n, 3). Body rates of the kite [rad/s].
position_b: Array of size (1, 3). Position of the surface to compute local
alpha/beta [m].
apparent_wind_b: Array of size (n,3). Apparent wind vector from the state
estimator [m/s].
Returns:
local_alpha_deg, local_beta_deg: The values of local alpha and beta.
The math for a relative angle of attack at a given section is as follows:
(1) Kinematically:
v_section_b = apparent_wind_b - omega_b X position_b
(2) By definition:
alpha_rad = atan2(-v_section_b_z, -v_section_b_x)
beta_rad = asin(-v_section_b_y, mag(v_section_b))
where _x, _y, _z denote the unit basis vectors in the body coordinates.
"""
assert np.shape(omega_b) == np.shape(apparent_wind_b)
# The subtraction is because the cross product is the rigid body motion
# but the reference frame for the aero has the opposite effect of the
# motion of the rigid body motion frame.
local_vel = apparent_wind_b - np.cross(omega_b, position_b, axisa=1,
axisb=1)
local_vel_mag = np.linalg.norm(local_vel, axis=1)
local_alpha_deg = np.rad2deg(np.arctan2(-1.0 * local_vel[:, 2],
-1.0 * local_vel[:, 0]))
local_beta_deg = np.rad2deg(np.arcsin(-1.0 * local_vel[:, 1]
/ local_vel_mag))
return local_alpha_deg, local_beta_deg
| [
"numpy.cross",
"numpy.arcsin",
"numpy.array",
"makani.analysis.aero.hover_model.hover_model.GetParams",
"numpy.arctan2",
"numpy.expand_dims",
"numpy.linalg.norm",
"numpy.concatenate",
"numpy.shape",
"numpy.rad2deg"
] | [((5190, 5223), 'numpy.linalg.norm', 'np.linalg.norm', (['local_vel'], {'axis': '(1)'}), '(local_vel, axis=1)\n', (5204, 5223), True, 'import numpy as np\n'), ((1303, 1371), 'makani.analysis.aero.hover_model.hover_model.GetParams', 'hover_model.GetParams', (['wing_model', 'wing_serial'], {'use_wake_model': '(False)'}), '(wing_model, wing_serial, use_wake_model=False)\n', (1324, 1371), False, 'from makani.analysis.aero.hover_model import hover_model\n'), ((2661, 2673), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2669, 2673), True, 'import numpy as np\n'), ((4814, 4831), 'numpy.shape', 'np.shape', (['omega_b'], {}), '(omega_b)\n', (4822, 4831), True, 'import numpy as np\n'), ((4835, 4860), 'numpy.shape', 'np.shape', (['apparent_wind_b'], {}), '(apparent_wind_b)\n', (4843, 4860), True, 'import numpy as np\n'), ((5083, 5130), 'numpy.cross', 'np.cross', (['omega_b', 'position_b'], {'axisa': '(1)', 'axisb': '(1)'}), '(omega_b, position_b, axisa=1, axisb=1)\n', (5091, 5130), True, 'import numpy as np\n'), ((5255, 5313), 'numpy.arctan2', 'np.arctan2', (['(-1.0 * local_vel[:, 2])', '(-1.0 * local_vel[:, 0])'], {}), '(-1.0 * local_vel[:, 2], -1.0 * local_vel[:, 0])\n', (5265, 5313), True, 'import numpy as np\n'), ((5387, 5436), 'numpy.arcsin', 'np.arcsin', (['(-1.0 * local_vel[:, 1] / local_vel_mag)'], {}), '(-1.0 * local_vel[:, 1] / local_vel_mag)\n', (5396, 5436), True, 'import numpy as np\n'), ((2274, 2298), 'numpy.shape', 'np.shape', (['angular_rate_b'], {}), '(angular_rate_b)\n', (2282, 2298), True, 'import numpy as np\n'), ((2313, 2337), 'numpy.shape', 'np.shape', (['angular_rate_b'], {}), '(angular_rate_b)\n', (2321, 2337), True, 'import numpy as np\n'), ((2341, 2366), 'numpy.shape', 'np.shape', (['apparent_wind_b'], {}), '(apparent_wind_b)\n', (2349, 2366), True, 'import numpy as np\n'), ((2232, 2256), 'numpy.shape', 'np.shape', (['angular_rate_b'], {}), '(angular_rate_b)\n', (2240, 2256), True, 'import numpy as np\n'), ((2842, 2881), 'numpy.rad2deg', 'np.rad2deg', (["panel['relative_incidence']"], {}), "(panel['relative_incidence'])\n", (2852, 2881), True, 'import numpy as np\n'), ((3538, 3577), 'numpy.expand_dims', 'np.expand_dims', (['panel_alpha_deg'], {'axis': '(1)'}), '(panel_alpha_deg, axis=1)\n', (3552, 3577), True, 'import numpy as np\n'), ((3752, 3815), 'numpy.concatenate', 'np.concatenate', (['(main_wing_alphas_deg, panel_alpha_deg)'], {'axis': '(1)'}), '((main_wing_alphas_deg, panel_alpha_deg), axis=1)\n', (3766, 3815), True, 'import numpy as np\n'), ((3221, 3245), 'numpy.shape', 'np.shape', (['angular_rate_b'], {}), '(angular_rate_b)\n', (3229, 3245), True, 'import numpy as np\n'), ((3589, 3619), 'numpy.shape', 'np.shape', (['main_wing_alphas_deg'], {}), '(main_wing_alphas_deg)\n', (3597, 3619), True, 'import numpy as np\n'), ((3626, 3651), 'numpy.shape', 'np.shape', (['panel_alpha_deg'], {}), '(panel_alpha_deg)\n', (3634, 3651), True, 'import numpy as np\n'), ((3148, 3179), 'numpy.expand_dims', 'np.expand_dims', (['r_panel'], {'axis': '(1)'}), '(r_panel, axis=1)\n', (3162, 3179), True, 'import numpy as np\n')] |
import argparse
import os
import random
import time
import numpy as np
import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score
from sklearn.utils import shuffle
from analysis import rocstories as rocstories_analysis
from analysis import pw as pw_analysis
from analysis import pw_retrieved as pw_retrieved_analysis
from datasets import pw, rocstories
from model_pytorch import DoubleHeadModel, load_openai_pretrained_model, freeze_transformer_params
from opt import OpenAIAdam
from text_utils import TextEncoder, TextHideWordsEncoder, TextSelectIndexEncoder
from utils import (encode_dataset, iter_data,
ResultLogger, make_path)
from loss import MultipleChoiceLossCompute, ClassificationLossCompute
def transform_roc(X1, X2, X3):
n_batch = len(X1)
xmb = np.zeros((n_batch, 2, n_ctx, 2), dtype=np.int32)
mmb = np.zeros((n_batch, 2, n_ctx), dtype=np.float32)
start = encoder['_start_']
delimiter = encoder['_delimiter_']
for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)):
x12 = [start] + x1[:max_len] + [delimiter] + x2[:max_len] + [clf_token]
x13 = [start] + x1[:max_len] + [delimiter] + x3[:max_len] + [clf_token]
l12 = len(x12)
l13 = len(x13)
xmb[i, 0, :l12, 0] = x12
xmb[i, 1, :l13, 0] = x13
mmb[i, 0, :l12] = 1
mmb[i, 1, :l13] = 1
xmb[:, :, :, 1] = np.arange(n_vocab + n_special, n_vocab + n_special + n_ctx)
return xmb, mmb
def combine_locs(x1, x2, loc):
#combine context and part locs. add to compensate for premise and delimiters
#save four locations for whole, two locations for part and jj, for size consistency
new_loc = []
for ix, (l1,l2) in enumerate(zip(*loc)):
#spaghetti code to deal with multi-word wholes......
if ix == 0:
if type(l1) is tuple:
#multi-word whole in sentence 1
new_loc.extend([l1[0]+1, l1[1]+1])
elif l1 != -1:
#whole in sentence 1
new_loc.extend([l1+1, l1+1])
else:
#whole must be in sentence 2
if type(l2) is tuple:
new_loc.extend([l2[0]+len(x1)+2, l2[1]+len(x1)+2])
else:
new_loc.extend([l2+len(x1)+2, l2+len(x1)+2])
if type(l2) is tuple:
#multi-word whole in sentence 2
new_loc.extend([l2[0]+len(x1)+2, l2[1]+len(x1)+2])
elif l2 != -1:
#whole in sentence 2
new_loc.extend([l2+len(x1)+2, l2+len(x1)+2])
else:
#whole must be in sentence 1
if type(l1) is tuple:
new_loc.extend([l1[0]+1, l1[1]+1])
else:
new_loc.extend([l1+1, l1+1])
else:
if l1 == -1:
new_loc.extend([l2+len(x1)+2, l2+len(x1)+2])
elif l2 == -1:
new_loc.extend([l1+1, l1+1])
else:
new_loc.extend([l1+1, l2+len(x1)+2])
return new_loc
def transform_pw(X1, X2, locs=None, hide_words=False):
"""
Glue stories together with delimiter and stuff, and add position tokens
"""
xmb = []#np.zeros((n_batch, n_ctx, 2), dtype=np.int32)
mmb = []#np.zeros((n_batch, n_ctx), dtype=np.float32)
#size is 8 - see combine_locs
lmb = []#np.zeros((n_batch, 8), dtype=np.int32)
start = encoder['_start_']
delimiter = encoder['_delimiter_']
fields = (X1, X2, *locs) if locs else (X1, X2)
#position tokens
start = n_vocab
end = n_vocab + n_ctx
pos_toks = np.arange(start, end)
idxs = []
for idx,data in enumerate(zip(*fields)):
if locs:
x1, x2, *loc = data
if len(x1) > max_len or len(x2) > max_len or len(x1) + len(x2) > n_ctx:
#too long, don't deal
continue
loc = combine_locs(x1, x2, loc)
if not any(loc) or not any(x1) or not any(x2):
continue
lmb.append(loc)
else:
x1, x2 = data
#concatenate
x = [start] + x1[:max_len]+[delimiter]+x2[:max_len]+[clf_token]
l = len(x)
#set np array
#xmb[i,:l,0] = x
xmb.append(np.vstack((x + [0] * (n_ctx - l), pos_toks)))
#mask
#mmb[i,:l] = 1
mmb.append([1] * l + [0] * (n_ctx - l))
idxs.append(idx)
return np.array(xmb), np.array(mmb), np.array(lmb), idxs
def iter_apply(Xs, Ms, Ys, Ls=None):
# fns = [lambda x: np.concatenate(x, 0), lambda x: float(np.sum(x))]
fields = (Xs, Ms, Ys, Ls) if Ls is not None else (Xs, Ms, Ys)
logits = []
cost = 0
with torch.no_grad():
dh_model.eval()
for field in iter_data(*fields, n_batch=n_batch_train, truncate=False, verbose=True):
if len(fields) == 3:
xmb, mmb, ymb = field
else:
xmb, mmb, ymb, lmb = field
n = len(xmb)
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
YMB = torch.tensor(ymb, dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
if len(fields) > 3:
LMB = torch.tensor(lmb, dtype=torch.long).to(device)
_, clf_logits = dh_model(XMB, LMB)
else:
_, clf_logits = dh_model(XMB)
clf_logits *= n
clf_losses = compute_loss_fct(XMB, YMB, MMB, clf_logits, only_return_losses=True)
clf_losses *= n
logits.append(clf_logits.to("cpu").numpy())
cost += clf_losses.sum().item()
logits = np.concatenate(logits, 0)
return logits, cost
def iter_predict(Xs, Ms, Ls=None):
logits = []
fields = (Xs, Ms, Ls) if Ls is not None else (Xs, Ms)
with torch.no_grad():
dh_model.eval()
for field in iter_data(*fields, n_batch=n_batch_train, truncate=False, verbose=True):
if len(fields) == 2:
xmb, mmb = field
else:
xmb, mmb, lmb = field
n = len(xmb)
XMB = torch.tensor(xmb, dtype=torch.long).to(device)
MMB = torch.tensor(mmb).to(device)
if len(fields) > 2:
LMB = torch.tensor(lmb, dtype=torch.long).to(device)
_, clf_logits = dh_model(XMB, LMB)
else:
_, clf_logits = dh_model(XMB)
logits.append(clf_logits.to("cpu").numpy())
logits = np.concatenate(logits, 0)
return logits
def log(save_dir, desc):
global best_score
print("Logging")
tr_inps = trX[:n_valid], trM[:n_valid], trY[:n_valid]
va_inps = vaX, vaM, vaY
if hard_select:
tr_inps = (*tr_inps, trL[:n_valid])
va_inps = (*va_inps, vaL)
tr_logits, tr_cost = iter_apply(*tr_inps)
va_logits, va_cost = iter_apply(*va_inps)
tr_cost = tr_cost / len(trY[:n_valid])
va_cost = va_cost / n_valid
tr_acc = accuracy_score(trY[:n_valid], np.argmax(tr_logits, 1)) * 100.
va_acc = accuracy_score(vaY, np.argmax(va_logits, 1)) * 100.
logger.log(n_epochs=n_epochs, n_updates=n_updates, tr_cost=tr_cost, va_cost=va_cost, tr_acc=tr_acc, va_acc=va_acc)
print('%d %d %.3f %.3f %.2f %.2f' % (n_epochs, n_updates, tr_cost, va_cost, tr_acc, va_acc))
if submit:
score = va_acc
if score > best_score:
best_score = score
path = os.path.join(save_dir, desc + '_' + exec_time, 'best_params')
torch.save(dh_model.state_dict(), make_path(path))
def predict(dataset, submission_dir, test):
filename = filenames[dataset].replace('.tsv', f'_{exec_time}.tsv')
pred_fn = pred_fns[dataset]
label_decoder = label_decoders[dataset]
if test:
fields = (teX, teM, teL) if hard_select else (teX, teM)
else:
fields = (vaX, vaM, vaL) if hard_select else (vaX, vaM)
predictions = pred_fn(iter_predict(*fields))
if label_decoder is not None:
predictions = [label_decoder[prediction] for prediction in predictions]
path = os.path.join(submission_dir, filename)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w') as f:
f.write('{}\t{}\n'.format('index', 'prediction'))
for i, prediction in enumerate(predictions):
f.write('{}\t{}\n'.format(i, prediction))
return path
def run_epoch(fields):
for ix,field in enumerate(iter_data(*shuffle(*fields, random_state=np.random),
n_batch=n_batch_train, truncate=True, verbose=True)):
global n_updates
if n_gpu > 1 and np.isnan(dh_model.module.transformer.embed.weight.data.cpu().numpy()).any():
print("null")
import pdb; pdb.set_trace()
elif n_gpu <= 1 and np.isnan(dh_model.transformer.embed.weight.data.cpu().numpy()).any():
print("null")
import pdb; pdb.set_trace()
dh_model.train()
if len(fields) == 3:
xmb, mmb, ymb = field
else:
xmb, mmb, ymb, lmb = field
XMB = torch.tensor(xmb, dtype=torch.long).to(device).transpose(1,2)
YMB = torch.tensor(ymb, dtype=torch.long).to(device)
MMB = torch.tensor(mmb, dtype=torch.float).to(device)
if len(fields) > 3:
LMB = torch.tensor(lmb, dtype=torch.long).to(device)
lm_logits, clf_logits = dh_model(XMB, LMB)
else:
lm_logits, clf_logits = dh_model(XMB)
compute_loss_fct(XMB, YMB, MMB, clf_logits, lm_logits)#, debug=ix==39)
n_updates += 1
if n_updates in [1000, 2000, 4000, 8000, 16000, 32000] and n_epochs == 0:
log(save_dir, desc)
argmax = lambda x: np.argmax(x, 1)
pred_fns = {
'rocstories': argmax,
'pw': argmax,
'pw-retrieved': argmax,
}
filenames = {
'rocstories': 'ROCStories.tsv',
'pw': 'pw_preds.tsv',
'pw-retrieved': 'pw_retrieved_preds.tsv',
}
label_decoders = {
'rocstories': None,
'pw': None,
'pw-retrieved': None,
}
analyses = {
'rocstories': rocstories_analysis,
'pw': pw_analysis,
'pw-retrieved': pw_retrieved_analysis,
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--desc', type=str, help="Description")
parser.add_argument('--dataset', choices=['pw', 'rocstories', 'pw-retrieved'])
parser.add_argument('--log_dir', type=str, default='log/')
parser.add_argument('--save_dir', type=str, default='save/')
parser.add_argument('--data_dir', type=str, default='data/')
parser.add_argument('--submission_dir', type=str, default='submission/')
parser.add_argument('--params_path', type=str, default='model/params_shapes.json', help="path to params file if using previously finetuned params")
parser.add_argument('--submit', action='store_true')
parser.add_argument('--analysis', action='store_true')
parser.add_argument('--ordinal', action='store_true', help="flag to do 5-class prediction instead of binary")
parser.add_argument('--test', action='store_true', help="flag to run on test")
parser.add_argument('--freeze_lm', action='store_true', help="flag to freeze (not update) LM weights - only train the classifier")
parser.add_argument('--hard_select', action='store_true', help="flag to use as final layer representation the concatenation of hidden states at appropriate indices of input")
parser.add_argument('--hide_words', action='store_true', help="flag to replace whole, part, and jj words with special tokens")
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--n_iter', type=int, default=3)
parser.add_argument('--n_batch', type=int, default=8)
parser.add_argument('--max_grad_norm', type=int, default=1)
parser.add_argument('--lr', type=float, default=6.25e-5)
parser.add_argument('--lr_warmup', type=float, default=0.002)
parser.add_argument('--n_ctx', type=int, default=512)
parser.add_argument('--n_embd', type=int, default=768)
parser.add_argument('--n_head', type=int, default=12)
parser.add_argument('--n_layer', type=int, default=12)
parser.add_argument('--embd_pdrop', type=float, default=0.1)
parser.add_argument('--attn_pdrop', type=float, default=0.1)
parser.add_argument('--resid_pdrop', type=float, default=0.1)
parser.add_argument('--clf_pdrop', type=float, default=0.1)
parser.add_argument('--l2', type=float, default=0.01)
parser.add_argument('--vector_l2', action='store_true')
parser.add_argument('--opt', type=str, default='adam')
parser.add_argument('--afn', type=str, default='gelu')
parser.add_argument('--lr_schedule', type=str, default='warmup_linear')
parser.add_argument('--encoder_path', type=str, default='model/encoder_bpe_40000.json')
parser.add_argument('--bpe_path', type=str, default='model/vocab_40000.bpe')
parser.add_argument('--n_transfer', type=int, default=12)
parser.add_argument('--lm_coef', type=float, default=0.5)
parser.add_argument('--b1', type=float, default=0.9)
parser.add_argument('--b2', type=float, default=0.999)
parser.add_argument('--e', type=float, default=1e-8)
parser.add_argument('--n_valid', type=int, default=374)
args = parser.parse_args()
args.exec_time = time.strftime('%b_%d_%H:%M:%S', time.localtime())
print(args)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# Constants
submit = args.submit
dataset = args.dataset
n_ctx = args.n_ctx
save_dir = args.save_dir
desc = args.desc
data_dir = args.data_dir
log_dir = args.log_dir
submission_dir = args.submission_dir
hard_select = args.hard_select
exec_time = args.exec_time
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("device", device, "n_gpu", n_gpu)
log_file = os.path.join(log_dir, '{}_{}.jsonl'.format(desc, exec_time))
logger = ResultLogger(path=log_file, **args.__dict__)
# formatting stuff
if hard_select:
text_encoder = TextSelectIndexEncoder(args.encoder_path, args.bpe_path)
elif args.hide_words:
text_encoder = TextHideWordsEncoder(args.encoder_path, args.bpe_path)
else:
text_encoder = TextEncoder(args.encoder_path, args.bpe_path)
encoder = text_encoder.encoder
n_vocab = len(text_encoder.encoder)
print("Encoding dataset...")
if args.dataset == 'rocstories':
(trX1, trX2, trX3, trY), (vaX1, vaX2, vaX3, vaY), (teX1, teX2, teX3) = encode_dataset(rocstories(data_dir), encoder=text_encoder)
elif args.dataset.startswith('pw'):
train_file = os.path.join(data_dir, ('retrieved_2_train_feats.jsonl') if args.dataset == 'pw-retrieved' else 'snli_style_train_feats.jsonl')
if hard_select or args.hide_words:
trdata, dvdata, tedata, triples = pw(train_file, args.ordinal, True)
if hard_select:
texts_tokens, locs = encode_dataset((trdata, dvdata, tedata), encoder=text_encoder, triples=triples)
(trX1, trX2, trY), (vaX1, vaX2, vaY), (teX1, teX2, teY) = texts_tokens
else:
(trX1, trX2, trY), (vaX1, vaX2, vaY), (teX1, teX2, teY) = encode_dataset((trdata, dvdata, tedata), encoder=text_encoder, triples=triples)
else:
(trX1, trX2, trY), (vaX1, vaX2, vaY), (teX1, teX2, teY) = encode_dataset(pw(train_file, args.ordinal, hard_select), encoder=text_encoder)
#output: unpadded lists of word indices
#special token
clf_token = encoder['_classify_']
#number of special characters
n_special = 6 if args.hide_words else 3
max_len = n_ctx // 2 - 2
n_additions = 0
#get max length of story + answer in train, val, test
#take min of (that + 3), n_ctx
#the 3 is to take care of the special start, delimiter, end tokens
if args.dataset == 'rocstories':
n_ctx = min(max([len(x1[:max_len])+max(len(x2[:max_len]), len(x3[:max_len])) for x1, x2, x3 in zip(trX1, trX2, trX3)]+[len(x1[:max_len])+max(len(x2[:max_len]), len(x3[:max_len])) for x1, x2, x3 in zip(vaX1, vaX2, vaX3)]+[len(x1[:max_len])+max(len(x2[:max_len]), len(x3[:max_len])) for x1, x2, x3 in zip(teX1, teX2, teX3)])+n_special, n_ctx)
elif args.dataset.startswith('pw'):
n_ctx = min(max([len(x1[:max_len])+len(x2[:max_len]) for x1, x2 in zip(trX1, trX2)]+[len(x1[:max_len])+len(x2[:max_len]) for x1, x2 in zip(vaX1, vaX2)]+[len(x1[:max_len])+len(x2[:max_len]) for x1, x2 in zip(teX1, teX2)])+n_special, n_ctx)
if args.dataset == 'rocstories':
trX, trM = transform_roc(trX1, trX2, trX3)
vaX, vaM = transform_roc(vaX1, vaX2, vaX3)
if submit:
teX, teM = transform_roc(teX1, teX2, teX3)
elif args.dataset.startswith('pw'):
if hard_select:
print("encoding locs")
trX, trM, trL, idxs = transform_pw(trX1, trX2, locs[0], hide_words=args.hide_words)
trY = trY[idxs]
vaX, vaM, vaL, idxs = transform_pw(vaX1, vaX2, locs[1], hide_words=args.hide_words)
vaY = vaY[idxs]
if submit:
teX, teM, teL, idxs = transform_pw(teX1, teX2, locs[2], hide_words=args.hide_words)
teY = teY[idxs]
else:
trX, trM, _, idxs = transform_pw(trX1, trX2, hide_words=args.hide_words)
trY = trY[idxs]
vaX, vaM, _, idxs = transform_pw(vaX1, vaX2, hide_words=args.hide_words)
vaY = vaY[idxs]
if submit:
teX, teM, _, idxs = transform_pw(teX1, teX2, hide_words=args.hide_words)
teY = teY[idxs]
n_train = len(trY)
n_valid = len(vaY)
n_batch_train = args.n_batch * max(n_gpu, 1)
n_updates_total = (n_train // n_batch_train) * args.n_iter
if args.dataset == 'rocstories':
task = 'multiple_choice'
elif args.dataset.startswith('pw'):
task = ('inference', 5 if args.ordinal else 2)
#if hide words, ignore the extra three we added
vocab = n_vocab + n_ctx
dh_model = DoubleHeadModel(args, clf_token, task, vocab, n_ctx, hard_select=hard_select)
criterion = nn.CrossEntropyLoss(reduce=False)
model_opt = OpenAIAdam(dh_model.parameters(),
lr=args.lr,
schedule=args.lr_schedule,
warmup=args.lr_warmup,
t_total=n_updates_total,
b1=args.b1,
b2=args.b2,
e=args.e,
l2=args.l2,
vector_l2=args.vector_l2,
max_grad_norm=args.max_grad_norm)
if args.dataset == 'rocstories':
compute_loss_fct = MultipleChoiceLossCompute(criterion,
criterion,
args.lm_coef,
model_opt)
elif args.dataset.startswith('pw'):
compute_loss_fct = ClassificationLossCompute(criterion,
criterion,
args.lm_coef,
model_opt)
if args.params_path == 'model/params_shapes.json':
load_openai_pretrained_model(dh_model.transformer, n_ctx=n_ctx, n_special=n_special)
else:
sd = torch.load(args.params_path)
sd = {(name[7:] if name.startswith('module.') else name):val for name,val in sd.items()}
dh_model.load_state_dict(sd)
args.n_iter = 0
if args.freeze_lm:
print("freezing params")
freeze_transformer_params(dh_model)
dh_model.to(device)
if n_gpu > 1:
dh_model = nn.DataParallel(dh_model)
n_updates = 0
n_epochs = 0
if dataset != 'stsb':
trYt = trY
#if submit:
# path = os.path.join(save_dir, desc, 'best_params')
# torch.save(dh_model.state_dict(), make_path(path))
best_score = 0
fields = (trX, trM, trYt, trL) if hard_select else (trX, trM, trYt)
for i in range(args.n_iter):
print("running epoch", i)
run_epoch(fields)
n_epochs += 1
log(save_dir, desc)
if submit:
path = os.path.join(save_dir, desc, 'best_params')
dh_model.load_state_dict(torch.load(path))
pred_path = predict(dataset, args.submission_dir, args.test)
if args.analysis:
analy_fn = analyses[dataset]
analy_fn(data_dir, pred_path, os.path.join(log_dir, log_file), test=args.test, ordinal=args.ordinal)
| [
"torch.nn.CrossEntropyLoss",
"utils.make_path",
"torch.cuda.device_count",
"numpy.array",
"torch.cuda.is_available",
"datasets.pw",
"numpy.arange",
"utils.ResultLogger",
"text_utils.TextEncoder",
"argparse.ArgumentParser",
"numpy.random.seed",
"numpy.concatenate",
"loss.MultipleChoiceLossCom... | [((805, 853), 'numpy.zeros', 'np.zeros', (['(n_batch, 2, n_ctx, 2)'], {'dtype': 'np.int32'}), '((n_batch, 2, n_ctx, 2), dtype=np.int32)\n', (813, 853), True, 'import numpy as np\n'), ((864, 911), 'numpy.zeros', 'np.zeros', (['(n_batch, 2, n_ctx)'], {'dtype': 'np.float32'}), '((n_batch, 2, n_ctx), dtype=np.float32)\n', (872, 911), True, 'import numpy as np\n'), ((1388, 1447), 'numpy.arange', 'np.arange', (['(n_vocab + n_special)', '(n_vocab + n_special + n_ctx)'], {}), '(n_vocab + n_special, n_vocab + n_special + n_ctx)\n', (1397, 1447), True, 'import numpy as np\n'), ((3636, 3657), 'numpy.arange', 'np.arange', (['start', 'end'], {}), '(start, end)\n', (3645, 3657), True, 'import numpy as np\n'), ((6521, 6546), 'numpy.concatenate', 'np.concatenate', (['logits', '(0)'], {}), '(logits, 0)\n', (6535, 6546), True, 'import numpy as np\n'), ((8104, 8142), 'os.path.join', 'os.path.join', (['submission_dir', 'filename'], {}), '(submission_dir, filename)\n', (8116, 8142), False, 'import os\n'), ((9752, 9767), 'numpy.argmax', 'np.argmax', (['x', '(1)'], {}), '(x, 1)\n', (9761, 9767), True, 'import numpy as np\n'), ((10231, 10256), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10254, 10256), False, 'import argparse\n'), ((13409, 13431), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (13420, 13431), False, 'import random\n'), ((13436, 13461), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (13450, 13461), True, 'import numpy as np\n'), ((13466, 13494), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (13483, 13494), False, 'import torch\n'), ((13499, 13536), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (13525, 13536), False, 'import torch\n'), ((13929, 13954), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (13952, 13954), False, 'import torch\n'), ((14089, 14133), 'utils.ResultLogger', 'ResultLogger', ([], {'path': 'log_file'}), '(path=log_file, **args.__dict__)\n', (14101, 14133), False, 'from utils import encode_dataset, iter_data, ResultLogger, make_path\n'), ((18193, 18270), 'model_pytorch.DoubleHeadModel', 'DoubleHeadModel', (['args', 'clf_token', 'task', 'vocab', 'n_ctx'], {'hard_select': 'hard_select'}), '(args, clf_token, task, vocab, n_ctx, hard_select=hard_select)\n', (18208, 18270), False, 'from model_pytorch import DoubleHeadModel, load_openai_pretrained_model, freeze_transformer_params\n'), ((18288, 18321), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduce': '(False)'}), '(reduce=False)\n', (18307, 18321), True, 'import torch.nn as nn\n'), ((4455, 4468), 'numpy.array', 'np.array', (['xmb'], {}), '(xmb)\n', (4463, 4468), True, 'import numpy as np\n'), ((4470, 4483), 'numpy.array', 'np.array', (['mmb'], {}), '(mmb)\n', (4478, 4483), True, 'import numpy as np\n'), ((4485, 4498), 'numpy.array', 'np.array', (['lmb'], {}), '(lmb)\n', (4493, 4498), True, 'import numpy as np\n'), ((4720, 4735), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4733, 4735), False, 'import torch\n'), ((4782, 4853), 'utils.iter_data', 'iter_data', (['*fields'], {'n_batch': 'n_batch_train', 'truncate': '(False)', 'verbose': '(True)'}), '(*fields, n_batch=n_batch_train, truncate=False, verbose=True)\n', (4791, 4853), False, 'from utils import encode_dataset, iter_data, ResultLogger, make_path\n'), ((5672, 5697), 'numpy.concatenate', 'np.concatenate', (['logits', '(0)'], {}), '(logits, 0)\n', (5686, 5697), True, 'import numpy as np\n'), ((5842, 5857), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5855, 5857), False, 'import torch\n'), ((5904, 5975), 'utils.iter_data', 'iter_data', (['*fields'], {'n_batch': 'n_batch_train', 'truncate': '(False)', 'verbose': '(True)'}), '(*fields, n_batch=n_batch_train, truncate=False, verbose=True)\n', (5913, 5975), False, 'from utils import encode_dataset, iter_data, ResultLogger, make_path\n'), ((8159, 8180), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (8174, 8180), False, 'import os\n'), ((13370, 13386), 'time.localtime', 'time.localtime', ([], {}), '()\n', (13384, 13386), False, 'import time\n'), ((14200, 14256), 'text_utils.TextSelectIndexEncoder', 'TextSelectIndexEncoder', (['args.encoder_path', 'args.bpe_path'], {}), '(args.encoder_path, args.bpe_path)\n', (14222, 14256), False, 'from text_utils import TextEncoder, TextHideWordsEncoder, TextSelectIndexEncoder\n'), ((18899, 18971), 'loss.MultipleChoiceLossCompute', 'MultipleChoiceLossCompute', (['criterion', 'criterion', 'args.lm_coef', 'model_opt'], {}), '(criterion, criterion, args.lm_coef, model_opt)\n', (18924, 18971), False, 'from loss import MultipleChoiceLossCompute, ClassificationLossCompute\n'), ((19494, 19583), 'model_pytorch.load_openai_pretrained_model', 'load_openai_pretrained_model', (['dh_model.transformer'], {'n_ctx': 'n_ctx', 'n_special': 'n_special'}), '(dh_model.transformer, n_ctx=n_ctx, n_special=\n n_special)\n', (19522, 19583), False, 'from model_pytorch import DoubleHeadModel, load_openai_pretrained_model, freeze_transformer_params\n'), ((19602, 19630), 'torch.load', 'torch.load', (['args.params_path'], {}), '(args.params_path)\n', (19612, 19630), False, 'import torch\n'), ((19854, 19889), 'model_pytorch.freeze_transformer_params', 'freeze_transformer_params', (['dh_model'], {}), '(dh_model)\n', (19879, 19889), False, 'from model_pytorch import DoubleHeadModel, load_openai_pretrained_model, freeze_transformer_params\n'), ((19952, 19977), 'torch.nn.DataParallel', 'nn.DataParallel', (['dh_model'], {}), '(dh_model)\n', (19967, 19977), True, 'import torch.nn as nn\n'), ((20460, 20503), 'os.path.join', 'os.path.join', (['save_dir', 'desc', '"""best_params"""'], {}), "(save_dir, desc, 'best_params')\n", (20472, 20503), False, 'import os\n'), ((4288, 4332), 'numpy.vstack', 'np.vstack', (['(x + [0] * (n_ctx - l), pos_toks)'], {}), '((x + [0] * (n_ctx - l), pos_toks))\n', (4297, 4332), True, 'import numpy as np\n'), ((7029, 7052), 'numpy.argmax', 'np.argmax', (['tr_logits', '(1)'], {}), '(tr_logits, 1)\n', (7038, 7052), True, 'import numpy as np\n'), ((7094, 7117), 'numpy.argmax', 'np.argmax', (['va_logits', '(1)'], {}), '(va_logits, 1)\n', (7103, 7117), True, 'import numpy as np\n'), ((7461, 7522), 'os.path.join', 'os.path.join', (['save_dir', "(desc + '_' + exec_time)", '"""best_params"""'], {}), "(save_dir, desc + '_' + exec_time, 'best_params')\n", (7473, 7522), False, 'import os\n'), ((8783, 8798), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (8796, 8798), False, 'import pdb\n'), ((13879, 13904), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13902, 13904), False, 'import torch\n'), ((14306, 14360), 'text_utils.TextHideWordsEncoder', 'TextHideWordsEncoder', (['args.encoder_path', 'args.bpe_path'], {}), '(args.encoder_path, args.bpe_path)\n', (14326, 14360), False, 'from text_utils import TextEncoder, TextHideWordsEncoder, TextSelectIndexEncoder\n'), ((14394, 14439), 'text_utils.TextEncoder', 'TextEncoder', (['args.encoder_path', 'args.bpe_path'], {}), '(args.encoder_path, args.bpe_path)\n', (14405, 14439), False, 'from text_utils import TextEncoder, TextHideWordsEncoder, TextSelectIndexEncoder\n'), ((14680, 14700), 'datasets.rocstories', 'rocstories', (['data_dir'], {}), '(data_dir)\n', (14690, 14700), False, 'from datasets import pw, rocstories\n'), ((14785, 14914), 'os.path.join', 'os.path.join', (['data_dir', "('retrieved_2_train_feats.jsonl' if args.dataset == 'pw-retrieved' else\n 'snli_style_train_feats.jsonl')"], {}), "(data_dir, 'retrieved_2_train_feats.jsonl' if args.dataset ==\n 'pw-retrieved' else 'snli_style_train_feats.jsonl')\n", (14797, 14914), False, 'import os\n'), ((19198, 19270), 'loss.ClassificationLossCompute', 'ClassificationLossCompute', (['criterion', 'criterion', 'args.lm_coef', 'model_opt'], {}), '(criterion, criterion, args.lm_coef, model_opt)\n', (19223, 19270), False, 'from loss import MultipleChoiceLossCompute, ClassificationLossCompute\n'), ((20537, 20553), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (20547, 20553), False, 'import torch\n'), ((7569, 7584), 'utils.make_path', 'make_path', (['path'], {}), '(path)\n', (7578, 7584), False, 'from utils import encode_dataset, iter_data, ResultLogger, make_path\n'), ((8475, 8515), 'sklearn.utils.shuffle', 'shuffle', (['*fields'], {'random_state': 'np.random'}), '(*fields, random_state=np.random)\n', (8482, 8515), False, 'from sklearn.utils import shuffle\n'), ((8947, 8962), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (8960, 8962), False, 'import pdb\n'), ((9194, 9229), 'torch.tensor', 'torch.tensor', (['ymb'], {'dtype': 'torch.long'}), '(ymb, dtype=torch.long)\n', (9206, 9229), False, 'import torch\n'), ((9255, 9291), 'torch.tensor', 'torch.tensor', (['mmb'], {'dtype': 'torch.float'}), '(mmb, dtype=torch.float)\n', (9267, 9291), False, 'import torch\n'), ((15002, 15036), 'datasets.pw', 'pw', (['train_file', 'args.ordinal', '(True)'], {}), '(train_file, args.ordinal, True)\n', (15004, 15036), False, 'from datasets import pw, rocstories\n'), ((20733, 20764), 'os.path.join', 'os.path.join', (['log_dir', 'log_file'], {}), '(log_dir, log_file)\n', (20745, 20764), False, 'import os\n'), ((5030, 5065), 'torch.tensor', 'torch.tensor', (['xmb'], {'dtype': 'torch.long'}), '(xmb, dtype=torch.long)\n', (5042, 5065), False, 'import torch\n'), ((5095, 5130), 'torch.tensor', 'torch.tensor', (['ymb'], {'dtype': 'torch.long'}), '(ymb, dtype=torch.long)\n', (5107, 5130), False, 'import torch\n'), ((5160, 5177), 'torch.tensor', 'torch.tensor', (['mmb'], {}), '(mmb)\n', (5172, 5177), False, 'import torch\n'), ((6142, 6177), 'torch.tensor', 'torch.tensor', (['xmb'], {'dtype': 'torch.long'}), '(xmb, dtype=torch.long)\n', (6154, 6177), False, 'import torch\n'), ((6207, 6224), 'torch.tensor', 'torch.tensor', (['mmb'], {}), '(mmb)\n', (6219, 6224), False, 'import torch\n'), ((9349, 9384), 'torch.tensor', 'torch.tensor', (['lmb'], {'dtype': 'torch.long'}), '(lmb, dtype=torch.long)\n', (9361, 9384), False, 'import torch\n'), ((15102, 15181), 'utils.encode_dataset', 'encode_dataset', (['(trdata, dvdata, tedata)'], {'encoder': 'text_encoder', 'triples': 'triples'}), '((trdata, dvdata, tedata), encoder=text_encoder, triples=triples)\n', (15116, 15181), False, 'from utils import encode_dataset, iter_data, ResultLogger, make_path\n'), ((15361, 15440), 'utils.encode_dataset', 'encode_dataset', (['(trdata, dvdata, tedata)'], {'encoder': 'text_encoder', 'triples': 'triples'}), '((trdata, dvdata, tedata), encoder=text_encoder, triples=triples)\n', (15375, 15440), False, 'from utils import encode_dataset, iter_data, ResultLogger, make_path\n'), ((15540, 15581), 'datasets.pw', 'pw', (['train_file', 'args.ordinal', 'hard_select'], {}), '(train_file, args.ordinal, hard_select)\n', (15542, 15581), False, 'from datasets import pw, rocstories\n'), ((5243, 5278), 'torch.tensor', 'torch.tensor', (['lmb'], {'dtype': 'torch.long'}), '(lmb, dtype=torch.long)\n', (5255, 5278), False, 'import torch\n'), ((6290, 6325), 'torch.tensor', 'torch.tensor', (['lmb'], {'dtype': 'torch.long'}), '(lmb, dtype=torch.long)\n', (6302, 6325), False, 'import torch\n'), ((9118, 9153), 'torch.tensor', 'torch.tensor', (['xmb'], {'dtype': 'torch.long'}), '(xmb, dtype=torch.long)\n', (9130, 9153), False, 'import torch\n')] |
import cv2
import numpy as np
from imutils.video import FileVideoStream
import imutils
import time
vs = FileVideoStream('messi.webm').start()
while vs.more():
frame=vs.read()
if frame is None:
continue
output=frame.copy()
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
gray=cv2.medianBlur(gray,5)
gray=cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,3,5)
kernel=np.ones((3,3),np.uint8)
gray=cv2.erode(gray,kernel,iterations=1)
gray=cv2.dilate(gray,kernel,iterations=1)
circles=cv2.HoughCircles(gray,cv2.HOUGH_GRADIENT,1,260,param1=30,param2=65,minRadius=0)
radii=[]
if circles is None:
continue
circles=np.uint16(np.around(circles))
for i in range(circles.shape[0]):
radii.append(circles[i][0][2])
R=max(radii)
X=None
Y=None
for i in range(circles.shape[0]):
if circles[i][0][2]==R:
X=circles[i][0][0]
Y=circles[i][0][1]
break
cv2.circle(output,(X,Y),R,(0,255,0),4)
cv2.imshow('result',output)
cv2.waitKey(1)
cv2.destroyAllWindows()
vs.stop() | [
"numpy.ones",
"imutils.video.FileVideoStream",
"cv2.erode",
"cv2.medianBlur",
"cv2.HoughCircles",
"cv2.imshow",
"cv2.adaptiveThreshold",
"cv2.circle",
"cv2.destroyAllWindows",
"numpy.around",
"cv2.cvtColor",
"cv2.dilate",
"cv2.waitKey"
] | [((976, 999), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (997, 999), False, 'import cv2\n'), ((233, 272), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (245, 272), False, 'import cv2\n'), ((278, 301), 'cv2.medianBlur', 'cv2.medianBlur', (['gray', '(5)'], {}), '(gray, 5)\n', (292, 301), False, 'import cv2\n'), ((307, 401), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['gray', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(3)', '(5)'], {}), '(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 3, 5)\n', (328, 401), False, 'import cv2\n'), ((400, 425), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (407, 425), True, 'import numpy as np\n'), ((430, 467), 'cv2.erode', 'cv2.erode', (['gray', 'kernel'], {'iterations': '(1)'}), '(gray, kernel, iterations=1)\n', (439, 467), False, 'import cv2\n'), ((472, 510), 'cv2.dilate', 'cv2.dilate', (['gray', 'kernel'], {'iterations': '(1)'}), '(gray, kernel, iterations=1)\n', (482, 510), False, 'import cv2\n'), ((518, 607), 'cv2.HoughCircles', 'cv2.HoughCircles', (['gray', 'cv2.HOUGH_GRADIENT', '(1)', '(260)'], {'param1': '(30)', 'param2': '(65)', 'minRadius': '(0)'}), '(gray, cv2.HOUGH_GRADIENT, 1, 260, param1=30, param2=65,\n minRadius=0)\n', (534, 607), False, 'import cv2\n'), ((892, 937), 'cv2.circle', 'cv2.circle', (['output', '(X, Y)', 'R', '(0, 255, 0)', '(4)'], {}), '(output, (X, Y), R, (0, 255, 0), 4)\n', (902, 937), False, 'import cv2\n'), ((932, 960), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'output'], {}), "('result', output)\n", (942, 960), False, 'import cv2\n'), ((961, 975), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (972, 975), False, 'import cv2\n'), ((104, 133), 'imutils.video.FileVideoStream', 'FileVideoStream', (['"""messi.webm"""'], {}), "('messi.webm')\n", (119, 133), False, 'from imutils.video import FileVideoStream\n'), ((659, 677), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (668, 677), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by <NAME> at 1/25/21
"""paper_plot_fig3.py
:description : script
:param :
:returns:
:rtype:
"""
import os
import matplotlib
import numpy as np
import pandas as pd
matplotlib.rc('font', family="Arial")
matplotlib.rcParams["font.family"] = 'Arial' # 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = ['Arial']
os.chdir('../../ComplementaryData/Step_04_Pan_Core_model/')
print('----- loading data -----')
output_table = 'models_df_products_2.tsv'
her_name = [
'LR1',
'LR10', 'LR11', 'LR12', 'LR13', 'LR14',
'LR17', 'LR18', 'LR19', 'LR2', 'LR3',
'LR4', 'LR6', 'LR7', 'LR8', 'LR9'
]
omn_name = ['JCM1112', 'MM2_3', 'MM4_1A', 'CF48_3A', 'SD2112', 'I5007', 'ATCC53608', 'DSM200016', 'IRT', 'TD1', 'mlc3',
'100_23', '20_2', '3c6', 'lpuph',]
sour_name = ['LTH5448', 'TMW1_112', 'TMW1_656', 'LTH2584']
# models_df_products_1 = pd.read_csv(output_table, sep='\t', index_col=0)
# models_df_products_2 = pd.read_csv('models_df_products_.tsv', sep='\t', index_col=0)
# models_df_products = models_df_products_1.merge(models_df_products_2[['model_id','hista_c','dhap_c' ,'mthgxl_c', '12ppd__R_c']],
# how='left', on='model_id')
# models_df_products.to_csv(output_table, sep='\t')
models_df_products = pd.read_csv(output_table, sep='\t', index_col=0)
models_df_products['group'] = 'her'
models_df_products.loc[models_df_products['model_id'].isin(omn_name), ['group']] = 'omn'
models_df_products.loc[models_df_products['model_id'].isin(sour_name), ['group']] = 'sou'
models_df_products = models_df_products.sort_values(by=['group', ])
# Index(['model_id', 'growth', 'reaset', 'metset', 'genset', 'lac__L_c', 'ac_c',
# 'etoh_c', 'hista_c', 'fol_c', 'adeadocbl_c', 'ppoh_c', '13ppd_c',
# 'dhap_c', 'mthgxl_c', '12ppd__R_c', 'group'],
# dtype='object')
products = ['lac__L_c', 'ac_c', 'etoh_c', 'hista_c', 'dhap_c', '12ppd__R_c', '13ppd_c', ]
results = {'her': [], 'omn': [], 'sou': []}
for group_i in results.keys():
df_temp = models_df_products[(models_df_products['group'] == group_i)]
len_i = df_temp.shape[0]
for product_i in products:
postive_i = df_temp[df_temp[product_i] > 0.1].shape[0] / len_i
results[group_i].append(postive_i)
print(results)
products_plot_df = pd.DataFrame.from_dict(data=results, orient='index', columns=products)
# %%
import matplotlib.pyplot as plt
colors = plt.cm.get_cmap('Set2').colors
colors = np.array(colors)
w = 0.35
for product_i in products:
fig, axs = plt.subplots(1, 3, figsize=(3 * w, w))
fig.patch.set_alpha(0)
axs[0].pie([products_plot_df[product_i][0], 1.0 - products_plot_df[product_i][0]], startangle=90,
colors=colors[[0, -1]], )
axs[1].pie([products_plot_df[product_i][1], 1.0 - products_plot_df[product_i][1]], startangle=90,
colors=colors[[1, -1]])
axs[2].pie([products_plot_df[product_i][2], 1.0 - products_plot_df[product_i][2]], startangle=90,
colors=colors[[2, -1]])
fig.subplots_adjust(wspace=-0.4)
fig.savefig('fig3_' + product_i + '_percent.pdf', bbox_inches='tight')
fig.show()
# %%
fig, axs = plt.subplots(4, 1, figsize=(w * 2, 3.5 * w))
fig.patch.set_alpha(0)
axs[0].pie([1, 0], startangle=180, labels=['Herbivore', ''],
colors=colors[[0, -1]], textprops={'family': 'Arial', 'size': 8})
axs[1].pie([1, 0], startangle=180, labels=['Omnivore', ''],
colors=colors[[1, -1]], textprops={'family': 'Arial', 'size': 8})
axs[2].pie([1, 0], startangle=180, labels=['Sourdough', ''],
colors=colors[[2, -1]], textprops={'family': 'Arial', 'size': 8})
axs[3].pie([0, 1], startangle=180, labels=['', 'Negative'],
colors=colors[[2, -1]], textprops={'family': 'Arial', 'size': 8})
fig.subplots_adjust(hspace=-0.1)
fig.savefig('fig3_' + 'lenged' + '_percent.pdf', bbox_inches='tight')
fig.show()
fig, axs = plt.subplots(1, 3, figsize=(w * 12, w))
fig.patch.set_alpha(0)
axs[0].pie([0.5, 0.5], startangle=90, labels=[ '','Herbivore'],
colors=colors[[-1,0 ]], textprops={'family': 'Arial', 'size': 8},)
axs[1].pie([0.5, 0.5], startangle=90, labels=[ '','Omnivore'],
colors=colors[[ -1,1]], textprops={'family': 'Arial', 'size': 8})
axs[2].pie([0.5, 0.5], startangle=90, labels=['','Sourdough' ],
colors=colors[[-1,2 ]], textprops={'family': 'Arial', 'size': 8})
# fig.subplots_adjust(hspace=-0.1)
fig.savefig('fig3_' + 'lenged_2' + '_percent.pdf', bbox_inches='tight')
fig.show()
| [
"pandas.read_csv",
"pandas.DataFrame.from_dict",
"os.chdir",
"numpy.array",
"matplotlib.rc",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.subplots"
] | [((226, 263), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {'family': '"""Arial"""'}), "('font', family='Arial')\n", (239, 263), False, 'import matplotlib\n'), ((377, 436), 'os.chdir', 'os.chdir', (['"""../../ComplementaryData/Step_04_Pan_Core_model/"""'], {}), "('../../ComplementaryData/Step_04_Pan_Core_model/')\n", (385, 436), False, 'import os\n'), ((1317, 1365), 'pandas.read_csv', 'pd.read_csv', (['output_table'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(output_table, sep='\\t', index_col=0)\n", (1328, 1365), True, 'import pandas as pd\n'), ((2339, 2409), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', ([], {'data': 'results', 'orient': '"""index"""', 'columns': 'products'}), "(data=results, orient='index', columns=products)\n", (2361, 2409), True, 'import pandas as pd\n'), ((2498, 2514), 'numpy.array', 'np.array', (['colors'], {}), '(colors)\n', (2506, 2514), True, 'import numpy as np\n'), ((3203, 3247), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '(w * 2, 3.5 * w)'}), '(4, 1, figsize=(w * 2, 3.5 * w))\n', (3215, 3247), True, 'import matplotlib.pyplot as plt\n'), ((3948, 3987), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(w * 12, w)'}), '(1, 3, figsize=(w * 12, w))\n', (3960, 3987), True, 'import matplotlib.pyplot as plt\n'), ((2458, 2481), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""Set2"""'], {}), "('Set2')\n", (2473, 2481), True, 'import matplotlib.pyplot as plt\n'), ((2567, 2605), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(3 * w, w)'}), '(1, 3, figsize=(3 * w, w))\n', (2579, 2605), True, 'import matplotlib.pyplot as plt\n')] |
from skimage import exposure
from scipy.misc import imread
from scipy import ndimage
import numpy as np
import random
import os
from data_augmentation import *
from AxonDeepSeg.patch_management_tools import apply_legacy_preprocess, apply_preprocess
import functools
import copy
def generate_list_transformations(transformations = {}, thresh_indices = [0,0.5], verbose=0):
L_transformations = []
dict_transformations = {'shifting':shifting,
'rescaling':rescaling,
'random_rotation':random_rotation,
'elastic':elastic,
'flipping':flipping,
'gaussian_blur':gaussian_blur
}
if transformations == {}:
L_transformations = [functools.partial(v, verbose=verbose) for k,v in dict_transformations.iteritems()]
else:
#print(transformations)
L_c = []
for k,v in transformations.iteritems():
if (k != 'type') and (v['activate']==True):
number = v['order']
c = (number,k,v)
L_c.append(c)
# We sort the transformations to make by the number preceding the transformation in the dict in the config file
L_c_sorted = sorted(L_c, key=lambda x: x[0])
# Creation of the list of transformations to apply
for tup in L_c_sorted:
k = tup[1]
v = tup[2]
map(v.pop, ['order','activate'])
L_transformations.append(functools.partial(dict_transformations[k], verbose=verbose, **v))
return L_transformations
def all_transformations(patch, thresh_indices = [0,0.5], transformations = {}, verbose=0):
"""
:param patch: [image,mask].
:param thresh_indices : list of float in [0,1] : the thresholds for the ground truthes labels.
:return: application of the random transformations to the pair [image,mask].
"""
L_transformations = generate_list_transformations(transformations, thresh_indices, verbose=verbose)
for transfo in L_transformations:
patch = transfo(patch)
return patch
def random_transformation(patch, thresh_indices = [0,0.5], transformations = {}, verbose=0):
"""
:param patch: [image,mask].
:param thresh_indices : list of float in [0,1] : the thresholds for the ground truthes labels.
:return: application of a random transformation to the pair [image,mask].
"""
L_transformations = generate_list_transformations(transformations, thresh_indices, verbose=verbose)
patch = random.choice(L_transformations)(patch)
return patch
def labellize_mask_2d(patch, thresh_indices=[0, 0.5]):
'''
Process a patch with 8 bit pixels ([0-255]) so that the pixels between two threshold values are set to the closest threshold, effectively
enabling the creation of a mask with as many different values as there are thresholds.
Returns mask in [0-1] domain
'''
mask = np.zeros_like(patch)
for indice in range(len(thresh_indices)-1):
thresh_inf_8bit = 255*thresh_indices[indice]
thresh_sup_8bit = 255*thresh_indices[indice+1]
idx = np.where((patch >= thresh_inf_8bit) & (patch < thresh_sup_8bit)) # returns (x, y) of the corresponding indices
mask[idx] = np.mean([thresh_inf_8bit/255,thresh_sup_8bit/255])
mask[(patch >= 255*thresh_indices[-1])] = 1
return patch
def transform_batches(list_batches):
'''
Transform batches so that they are readable by Tensorflow (good shapes)
:param list_batches: [batch_x, batch_y, (batch_w)]
:return transformed_batches: Returns the batches with good shapes for tensorflow
'''
batch_x = list_batches[0]
batch_y = list_batches[1]
if len(list_batches) == 3:
batch_w = list_batches[2]
if len(batch_y) == 1: # If we have only one image in the list np.stack won't work
transformed_batches = []
transformed_batches.append(np.reshape(batch_x[0], (1, batch_x[0].shape[0], batch_x[0].shape[1])))
transformed_batches.append(np.reshape(batch_y[0], (1, batch_y[0].shape[0], batch_y[0].shape[1], -1)))
if len(list_batches) == 3:
transformed_batches.append(np.reshape(batch_w[0], (1, batch_w[0].shape[0], batch_w[0].shape[1])))
else:
transformed_batches = [np.stack(batch_x), np.stack(batch_y)]
if len(list_batches) == 3:
transformed_batches.append(np.stack(batch_w))
return transformed_batches
#######################################################################################################################
# Input data for the U-Net #
#######################################################################################################################
class input_data:
"""
Data to feed the learning/validating of the CNN
"""
def __init__(self, trainingset_path, config, type_ ='train', batch_size = 8, preload_all=True):
"""
Input:
trainingset_path : string : path to the trainingset folder containing 2 folders Validation and Train
with images and ground truthes.
type_ : string 'train' or 'validation' : for the network's training.
thresh_indices : list of float in [0,1] : the thresholds for the ground truthes labels.
preload_all : if put to True, will load every image into the memory.
Output:
None.
"""
if type_ == 'train' : # Data for train
self.path = trainingset_path+'/Train/'
self.set_size = len([f for f in os.listdir(self.path) if ('image' in f)])
self.each_sample_once = False
if type_ == 'validation': # Data for validation
self.path = trainingset_path+'/Validation/'
self.set_size = len([f for f in os.listdir(self.path) if ('image' in f)])
self.each_sample_once = True
self.size_image = config["trainingset_patchsize"]
self.n_labels = 2
self.samples_seen = 0
self.thresh_indices = config["thresholds"]
self.batch_size = batch_size
self.samples_list = self.reset_set(type_=type_)
self.epoch_size = len(self.samples_list)
self.preload_all = preload_all
self.loaded_data = None
#self.mean = config['dataset_mean']
#self.variance = config['dataset_variance']
# Loading all images if asked so
if preload_all:
self.loaded_data = {}
for id_image in self.samples_list:
# We are reading directly the images. Range of values : 0-255
image = self.read_image('image', id_image)
mask = self.read_image('mask', id_image)
self.loaded_data.update({str(id_image):[image,mask]})
def get_size(self):
return self.set_size
def reset_set(self, type_= 'train', shuffle=True):
"""
Reset the set.
:param shuffle: If True, the set is shuffled, so that each batch won't systematically contain the same images.
:return list: List of ids of training samples
"""
self.sample_seen = 0
if type_ == 'train':
# Generation of a shuffled list of images
samples_list = range(self.set_size)
if shuffle:
np.random.shuffle(samples_list)
# Adding X images so that all batches have the same size.
rem = self.set_size % self.batch_size
if rem != 0:
samples_list += np.random.choice(samples_list, self.batch_size - rem, replace=False).tolist()
else:
samples_list = range(self.set_size)
return samples_list
def next_batch(self, augmented_data_ = {'type':'None'}, each_sample_once=False, data_aug_verbose=0):
"""
:param augmented_data: if True, each patch of the batch is randomly transformed with the data augmentation process.
:return: The pair [batch_x (data), batch_y (prediction)] to feed the network.
"""
batch_x = []
batch_y = []
# Set the range of indices
# Read the image and mask files.
for i in range(self.batch_size) :
# We load the image and discretize the masks
image, real_mask = self.prepare_image_mask()
# We apply data augmentation
augmented_data = copy.deepcopy(augmented_data_)
image, real_mask = self.apply_data_augmentation([image, real_mask], augmented_data, data_aug_verbose)
# Normalisation of the image
image = apply_legacy_preprocess(image)
#image = apply_preprocess(image, self.mean, self.variance)
# We save the obtained image and mask.
batch_x.append(image)
batch_y.append(real_mask)
# If we are at the end of an epoch, we reset the list of samples, so that during next epoch all sets will be different.
if self.sample_seen == self.epoch_size:
if each_sample_once:
self.samples_list = self.reset_set(type_ = 'validation')
break
else:
self.samples_list = self.reset_set(type_ = 'train')
# Ensuring that we do have np.arrays of the good size for batch_x and batch_y before returning them
return transform_batches([batch_x, batch_y])
def next_batch_WithWeights(self, augmented_data_ = {'type':'None'},
weights_modifier = {'balanced_activate':True, 'balanced_weights':[1.1, 1, 1.3],
'boundaries_activate':False},
each_sample_once=False, data_aug_verbose=0):
"""
:param weights_modifier:
:param augmented_data: if True, each patch of the batch is randomly transformed with the data augmentation process.
:return: The triplet [batch_x (data), batch_y (prediction), weights (based on distance to edges)] to feed the network.
"""
batch_x = []
batch_y = []
batch_w = []
for i in range(self.batch_size) :
# We prepare the image and the corresponding mask by discretizing the mask.
image, real_mask = self.prepare_image_mask()
# Generation of the weights map
real_weights = self.generate_weights_map(weights_modifier, real_mask)
# Application of data augmentation
augmented_data = copy.deepcopy(augmented_data_)
image, real_mask, real_weights = self.apply_data_augmentation([image, real_mask, real_weights],
augmented_data, data_aug_verbose)
# Normalisation of the image
image = apply_legacy_preprocess(image)
# We have now loaded the good image, a mask (under the shape of a matrix, with different labels) that still needs to be converted to a volume (meaning, a sparse cube where each layer of depth relates to a class)
batch_x.append(image)
batch_y.append(real_mask)
batch_w.append(real_weights)
# If we are at the end of an epoch, we reset the list of samples, so that during next epoch all sets will be different.
if self.sample_seen == self.epoch_size:
if each_sample_once:
self.samples_list = self.reset_set(type_ = 'validation')
break
else:
self.samples_list = self.reset_set(type_ = 'train')
# Ensuring that we do have np.arrays of the good size for batch_x and batch_y before returning them
return transform_batches([batch_x, batch_y, batch_w])
def read_image(self, type_, i):
'''
:param i: indice of the image or mask to read.
:return image: the loaded image with 8 bit pixels, range of values being [0,288]
'''
# Loading the image using 8-bit pixels (0-255)
return imread(os.path.join(self.path,str(type_) + '_%s.png' % i), flatten=False, mode='L')
def prepare_image_mask(self):
"""
Loads the image and the mask, and discretizes the mask (and converts it in N dimensions, one for each class).
:return: Image (ndarray, (H,W)) and Mask (ndarray, (H,W,C)). C number of classes.
"""
# We take the next sample to see
indice = self.samples_list.pop(0)
self.sample_seen += 1
if self.preload_all:
image, mask = self.loaded_data[str(indice)]
else:
image = self.read_image('image', indice)
mask = self.read_image('mask', indice)
# Discretization of the mask
mask = labellize_mask_2d(mask, self.thresh_indices) # mask intensity float between 0-1
# Working out the real mask (sparse cube with n depth layer, one for each class)
n = len(self.thresh_indices) # number of classes
thresh_indices = [255*x for x in self.thresh_indices]
real_mask = np.zeros([mask.shape[0], mask.shape[1], n])
for class_ in range(n-1):
real_mask[:,:,class_] = (mask[:,:] >= thresh_indices[class_]) * (mask[:,:] < thresh_indices[class_+1])
real_mask[:,:,-1] = (mask[:,:] >= thresh_indices[-1])
real_mask = real_mask.astype(np.uint8)
return [image, real_mask]
def apply_data_augmentation(self, element, augmented_data, data_aug_verbose=0):
"""
Applies data augmentation to the requested image and mask.
:param image: Image (ndarray) to apply data augmentation to.
:param mask: Mask of the image (ndarray) to apply data augmentation to.
:param augmented_data: Dict, contains the parameters of the data augmentation to apply.
:param data_aug_verbose: Int. If >=1, displays information about the data augmentation process.
:return: Image and Mask that have been transformed.
"""
# Online data augmentation
if augmented_data['type'].lower() == 'all':
augmented_data.pop('type')
augmented_element = all_transformations(element,
transformations=augmented_data,
thresh_indices=self.thresh_indices,
verbose=data_aug_verbose)
elif augmented_data['type'].lower() == 'random':
augmented_data.pop('type')
augmented_element = random_transformation(element,
transformations=augmented_data,
thresh_indices=self.thresh_indices,
verbose=data_aug_verbose)
else:
augmented_element = element
return augmented_element
def generate_boundary_weights(self, real_mask, weights_intermediate, sigma):
"""
Generates the boundary weights from the mask.
:param real_mask: the discretized mask.
:return: The 3D ndarray of the boundary weights (H,W,C) with C the number of classes.
"""
# Create a weight map for each class (background is the first class, equal to 1
n_classes = len(self.thresh_indices)
# Classical method to compute weights
for indice, class_ in enumerate(self.thresh_indices[1:]):
mask_class = real_mask[:, :, indice]
mask_class_8bit = np.asarray(255 * mask_class, dtype='uint8')
weight = ndimage.distance_transform_edt(mask_class_8bit)
weight[weight == 0] = np.max(weight)
if class_ == self.thresh_indices[1]:
w0 = 0.5
else:
w0 = 1
weight = 1 + w0 * np.exp(-(weight.astype(np.float64) / sigma) ** 2 / 2)
weights_intermediate[:, indice] = weight.reshape(-1, 1)[:, 0]
# Generating the mask with the real labels as well as the matrix of the weights
return np.reshape(weights_intermediate, [real_mask.shape[0], real_mask.shape[1], n_classes])
def generate_weights_map(self, weights_modifier, real_mask):
"""
Generates the weights for an image based on the mask.
:param weights_modifier: Dict, contains the parameters about the weights to use.
:param real_mask: Discretized mask (ndarray).
:return: Weights map taking into account both balance weights and boundary weights.
"""
weights_intermediate = np.ones((self.size_image * self.size_image, len(self.thresh_indices)))
n = len(self.thresh_indices)
# We generate the boundary weights map if necessary.
if weights_modifier['boundaries_activate'] == True:
# Create a boundary weight map for each class (background is the first class, equal to 1
weights_intermediate = self.generate_boundary_weights(real_mask, weights_intermediate,
weights_modifier['boundaries_sigma'])
# Working out the real weights (sparse matrix with the weights associated with each pixel).
# We apply the balance weights as well as the boundary weights if necessary.
real_weights = np.zeros([real_mask.shape[0], real_mask.shape[1]])
for class_ in range(n):
mean_weights = np.mean(weights_modifier['balanced_weights'])
weights_multiplier = 1
if weights_modifier['balanced_activate'] == True:
balanced_factor = weights_modifier['balanced_weights'][class_] / mean_weights
weights_multiplier = np.multiply(weights_multiplier, balanced_factor)
if weights_modifier['boundaries_activate'] == True:
weights_multiplier = np.multiply(weights_multiplier, weights_intermediate[:, :, class_])
real_weights += np.multiply(real_mask[:, :, class_], weights_multiplier)
return real_weights
| [
"numpy.mean",
"scipy.ndimage.distance_transform_edt",
"random.choice",
"numpy.reshape",
"numpy.multiply",
"os.listdir",
"numpy.where",
"AxonDeepSeg.patch_management_tools.apply_legacy_preprocess",
"numpy.random.choice",
"numpy.asarray",
"numpy.max",
"numpy.stack",
"numpy.zeros",
"functools... | [((3080, 3100), 'numpy.zeros_like', 'np.zeros_like', (['patch'], {}), '(patch)\n', (3093, 3100), True, 'import numpy as np\n'), ((2665, 2697), 'random.choice', 'random.choice', (['L_transformations'], {}), '(L_transformations)\n', (2678, 2697), False, 'import random\n'), ((3289, 3353), 'numpy.where', 'np.where', (['((patch >= thresh_inf_8bit) & (patch < thresh_sup_8bit))'], {}), '((patch >= thresh_inf_8bit) & (patch < thresh_sup_8bit))\n', (3297, 3353), True, 'import numpy as np\n'), ((3420, 3475), 'numpy.mean', 'np.mean', (['[thresh_inf_8bit / 255, thresh_sup_8bit / 255]'], {}), '([thresh_inf_8bit / 255, thresh_sup_8bit / 255])\n', (3427, 3475), True, 'import numpy as np\n'), ((13556, 13599), 'numpy.zeros', 'np.zeros', (['[mask.shape[0], mask.shape[1], n]'], {}), '([mask.shape[0], mask.shape[1], n])\n', (13564, 13599), True, 'import numpy as np\n'), ((16571, 16660), 'numpy.reshape', 'np.reshape', (['weights_intermediate', '[real_mask.shape[0], real_mask.shape[1], n_classes]'], {}), '(weights_intermediate, [real_mask.shape[0], real_mask.shape[1],\n n_classes])\n', (16581, 16660), True, 'import numpy as np\n'), ((17820, 17870), 'numpy.zeros', 'np.zeros', (['[real_mask.shape[0], real_mask.shape[1]]'], {}), '([real_mask.shape[0], real_mask.shape[1]])\n', (17828, 17870), True, 'import numpy as np\n'), ((817, 854), 'functools.partial', 'functools.partial', (['v'], {'verbose': 'verbose'}), '(v, verbose=verbose)\n', (834, 854), False, 'import functools\n'), ((4100, 4169), 'numpy.reshape', 'np.reshape', (['batch_x[0]', '(1, batch_x[0].shape[0], batch_x[0].shape[1])'], {}), '(batch_x[0], (1, batch_x[0].shape[0], batch_x[0].shape[1]))\n', (4110, 4169), True, 'import numpy as np\n'), ((4206, 4279), 'numpy.reshape', 'np.reshape', (['batch_y[0]', '(1, batch_y[0].shape[0], batch_y[0].shape[1], -1)'], {}), '(batch_y[0], (1, batch_y[0].shape[0], batch_y[0].shape[1], -1))\n', (4216, 4279), True, 'import numpy as np\n'), ((4489, 4506), 'numpy.stack', 'np.stack', (['batch_x'], {}), '(batch_x)\n', (4497, 4506), True, 'import numpy as np\n'), ((4508, 4525), 'numpy.stack', 'np.stack', (['batch_y'], {}), '(batch_y)\n', (4516, 4525), True, 'import numpy as np\n'), ((8768, 8798), 'copy.deepcopy', 'copy.deepcopy', (['augmented_data_'], {}), '(augmented_data_)\n', (8781, 8798), False, 'import copy\n'), ((8975, 9005), 'AxonDeepSeg.patch_management_tools.apply_legacy_preprocess', 'apply_legacy_preprocess', (['image'], {}), '(image)\n', (8998, 9005), False, 'from AxonDeepSeg.patch_management_tools import apply_legacy_preprocess, apply_preprocess\n'), ((10909, 10939), 'copy.deepcopy', 'copy.deepcopy', (['augmented_data_'], {}), '(augmented_data_)\n', (10922, 10939), False, 'import copy\n'), ((11231, 11261), 'AxonDeepSeg.patch_management_tools.apply_legacy_preprocess', 'apply_legacy_preprocess', (['image'], {}), '(image)\n', (11254, 11261), False, 'from AxonDeepSeg.patch_management_tools import apply_legacy_preprocess, apply_preprocess\n'), ((16030, 16073), 'numpy.asarray', 'np.asarray', (['(255 * mask_class)'], {'dtype': '"""uint8"""'}), "(255 * mask_class, dtype='uint8')\n", (16040, 16073), True, 'import numpy as np\n'), ((16095, 16142), 'scipy.ndimage.distance_transform_edt', 'ndimage.distance_transform_edt', (['mask_class_8bit'], {}), '(mask_class_8bit)\n', (16125, 16142), False, 'from scipy import ndimage\n'), ((16177, 16191), 'numpy.max', 'np.max', (['weight'], {}), '(weight)\n', (16183, 16191), True, 'import numpy as np\n'), ((17931, 17976), 'numpy.mean', 'np.mean', (["weights_modifier['balanced_weights']"], {}), "(weights_modifier['balanced_weights'])\n", (17938, 17976), True, 'import numpy as np\n'), ((18452, 18508), 'numpy.multiply', 'np.multiply', (['real_mask[:, :, class_]', 'weights_multiplier'], {}), '(real_mask[:, :, class_], weights_multiplier)\n', (18463, 18508), True, 'import numpy as np\n'), ((1571, 1635), 'functools.partial', 'functools.partial', (['dict_transformations[k]'], {'verbose': 'verbose'}), '(dict_transformations[k], verbose=verbose, **v)\n', (1588, 1635), False, 'import functools\n'), ((4364, 4433), 'numpy.reshape', 'np.reshape', (['batch_w[0]', '(1, batch_w[0].shape[0], batch_w[0].shape[1])'], {}), '(batch_w[0], (1, batch_w[0].shape[0], batch_w[0].shape[1]))\n', (4374, 4433), True, 'import numpy as np\n'), ((4611, 4628), 'numpy.stack', 'np.stack', (['batch_w'], {}), '(batch_w)\n', (4619, 4628), True, 'import numpy as np\n'), ((7664, 7695), 'numpy.random.shuffle', 'np.random.shuffle', (['samples_list'], {}), '(samples_list)\n', (7681, 7695), True, 'import numpy as np\n'), ((18205, 18253), 'numpy.multiply', 'np.multiply', (['weights_multiplier', 'balanced_factor'], {}), '(weights_multiplier, balanced_factor)\n', (18216, 18253), True, 'import numpy as np\n'), ((18355, 18422), 'numpy.multiply', 'np.multiply', (['weights_multiplier', 'weights_intermediate[:, :, class_]'], {}), '(weights_multiplier, weights_intermediate[:, :, class_])\n', (18366, 18422), True, 'import numpy as np\n'), ((5879, 5900), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (5889, 5900), False, 'import os\n'), ((6120, 6141), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (6130, 6141), False, 'import os\n'), ((7874, 7942), 'numpy.random.choice', 'np.random.choice', (['samples_list', '(self.batch_size - rem)'], {'replace': '(False)'}), '(samples_list, self.batch_size - rem, replace=False)\n', (7890, 7942), True, 'import numpy as np\n')] |
import dpctl
import syclbuffer as sb
import numpy as np
X = np.full((10 ** 4, 4098), 1e-4, dtype="d")
# warm-up
print("=" * 10 + " Executing warm-up " + "=" * 10)
print("NumPy result: ", X.sum(axis=0))
dpctl.set_default_queue("opencl", "cpu", 0)
print(
"SYCL({}) result: {}".format(
dpctl.get_current_queue().get_sycl_device().get_device_name(),
sb.columnwise_total(X),
)
)
dpctl.set_default_queue("opencl", "gpu", 0)
print(
"SYCL({}) result: {}".format(
dpctl.get_current_queue().get_sycl_device().get_device_name(),
sb.columnwise_total(X),
)
)
import timeit
print("Times for 'opencl:cpu:0'")
print(
timeit.repeat(
stmt="sb.columnwise_total(X)",
setup='dpctl.set_default_queue("opencl", "cpu", 0); '
"sb.columnwise_total(X)", # ensure JIT compilation is not counted
number=100,
globals=globals(),
)
)
print("Times for 'opencl:gpu:0'")
print(
timeit.repeat(
stmt="sb.columnwise_total(X)",
setup='dpctl.set_default_queue("opencl", "gpu", 0); sb.columnwise_total(X)',
number=100,
globals=globals(),
)
)
print("Times for NumPy")
print(timeit.repeat(stmt="X.sum(axis=0)", number=100, globals=globals()))
| [
"syclbuffer.columnwise_total",
"numpy.full",
"dpctl.get_current_queue",
"dpctl.set_default_queue"
] | [((61, 104), 'numpy.full', 'np.full', (['(10 ** 4, 4098)', '(0.0001)'], {'dtype': '"""d"""'}), "((10 ** 4, 4098), 0.0001, dtype='d')\n", (68, 104), True, 'import numpy as np\n'), ((205, 248), 'dpctl.set_default_queue', 'dpctl.set_default_queue', (['"""opencl"""', '"""cpu"""', '(0)'], {}), "('opencl', 'cpu', 0)\n", (228, 248), False, 'import dpctl\n'), ((402, 445), 'dpctl.set_default_queue', 'dpctl.set_default_queue', (['"""opencl"""', '"""gpu"""', '(0)'], {}), "('opencl', 'gpu', 0)\n", (425, 445), False, 'import dpctl\n'), ((369, 391), 'syclbuffer.columnwise_total', 'sb.columnwise_total', (['X'], {}), '(X)\n', (388, 391), True, 'import syclbuffer as sb\n'), ((566, 588), 'syclbuffer.columnwise_total', 'sb.columnwise_total', (['X'], {}), '(X)\n', (585, 588), True, 'import syclbuffer as sb\n'), ((298, 323), 'dpctl.get_current_queue', 'dpctl.get_current_queue', ([], {}), '()\n', (321, 323), False, 'import dpctl\n'), ((495, 520), 'dpctl.get_current_queue', 'dpctl.get_current_queue', ([], {}), '()\n', (518, 520), False, 'import dpctl\n')] |
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
import warnings
warnings.filterwarnings("ignore")
import gym
import pybullet_envs
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Normal
import torch.multiprocessing as mp
import time
import random
import numpy as np
from collections import deque
from statistics import mean, stdev
ENV = gym.make("HalfCheetahBulletEnv-v0")
OBS_DIM = ENV.observation_space.shape[0]
ACT_DIM = ENV.action_space.shape[0]
ACT_LIMIT = ENV.action_space.high[0]
ENV.close()
GAMMA = 0.99
# SEED를 선택해서 학습하세요.
# SEED = {1,100,200,300,600}
# 선택한 SEED로 셋팅하세요.
# 본 template파일 자유롭게 수정 가능하며 score.py 와 문제없이 연동된다면 괜찮습니다.
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
def init_weights(m):
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0., std=0.1)
nn.init.constant_(m.bias, 0.1)
class ActorCritic(nn.Module):
def __init__(self, state_size, action_size, hidden_size=256, seed=100, std=0.0):
super(ActorCritic, self).__init__()
self.seed = torch.manual_seed(seed)
self.critic = nn.Sequential(
nn.Linear(state_size, 256),
nn.LeakyReLU(),
nn.Linear(256, 128),
nn.LeakyReLU(),
nn.Linear(128, 64),
nn.LeakyReLU(),
nn.Linear(64, 1)
)
self.actor = nn.Sequential(
nn.Linear(state_size, 256),
nn.ReLU(),
nn.Linear(256, action_size),
nn.Tanh()
)
self.log_std = nn.Parameter(torch.ones(1, action_size) * std)
self.apply(init_weights)
def forward(self, x):
self.value = self.critic(x)
self.mu = self.actor(x)
self.std = self.log_std.exp().expand_as(self.mu) #
self.dist = Normal(self.mu, self.std)
return self.dist, self.value
# Actor를 이용해 state를 받아서 action을 예측, 반환
def get_action(self, x):
return # TODO
def learn(self, state_lst, logprob_lst, q_val_lst, entropy, optimizer):
"""
Computes advantages by subtracting a bseline(V(from critic)) from the estimated Q values
추가로 해볼 수 있는 것 : advantage normalize
Training a ActorCritic Agent refers to updating its actor using the given observations/actions
and the calculated q_values/ advantages that come from the seen rewards
"""
# TODO
self.log_probs = torch.cat(logprob_lst)
self.returns = torch.cat(q_val_lst).detach()
self.values = torch.cat(state_lst)
self.advantage = self.returns - self.values
self.actor_loss = -(self.log_probs * self.advantage.detach()).mean()
self.critic_loss = self.advantage.pow(2).mean()
self.loss = self.actor_loss + 0.5 * self.critic_loss - 0.001 * entropy
optimizer.zero_grad()
self.loss.backward()
optimizer.step()
def compute_gae(next_value, rewards, masks, values, gamma=0.99, tau=0.95):
values = values + [next_value]
gae = 0
returns = []
for step in reversed(range(len(rewards))):
delta = rewards[step] + gamma * values[step + 1] * masks[step] - values[step]
gae = delta + gamma * tau * masks[step] * gae
returns.insert(0, gae + values[step])
return returns
def n_step_td(reward_lst, V):
q_val_lst = []
# TODO: n_step td return
R = V
for step in reversed(range(len(reward_lst))): # TODO:
R = reward_lst[step] + GAMMA * R
q_val_lst.insert(0, R)
return q_val_lst
# episode_rewards, epi_plot, epi_reward는 나중에 plot할 때 필요한 정보이기 때문에 수정하지 말아주세요.
def Worker(num_episodes, n_steps):
env = gym.make("HalfCheetahBulletEnv-v0")
agent = ActorCritic(OBS_DIM, ACT_DIM).to(device)
optimizer = optim.Adam(agent.parameters(), lr=1e-4)
##########################이 부분은 수정하지 마세요###########################
episode_rewards = deque(maxlen=100)
start_time = time.time()
epi_plot = []
finish = False
##########################이 부분은 수정하지 마세요###########################
# TODO
for episode in range(num_episodes):
done = False
state = env.reset()
epi_reward = 0.
while not done:
s_lst, a_lst, r_lst = [], [], []
entropy = 0 #
masks = []
# N-step rollout
for t in range(n_steps):
# TODO
# action = agent.get_action # TODO
# while env takes in/out in numpy, nn.module does in tensor, convert!
state = torch.FloatTensor(state).unsqueeze(0).to(device)
dist, value = agent(state)
action = dist.sample()
##########################이 부분은 수정하지 마세요###########################
next_state, reward, done, _ = env.step(action.cpu().numpy()[0])
epi_reward += reward
##########################이 부분은 수정하지 마세요###########################
# TODO
log_prob = dist.log_prob(action)
entropy += dist.entropy().mean()
a_lst.append(log_prob)
s_lst.append(value)
# scalar reward, done to tensor
r_lst.append(torch.FloatTensor([reward]).unsqueeze(1).to(device))
# masks.append(torch.FloatTensor([1 - done]).unsqueeze(1).to(device))
if done:
break
state = next_state
next_state = torch.FloatTensor(next_state).unsqueeze(0).to(device)
_, value = agent(next_state)
# HINT : if done -> V = 0, else -> V = agent.critic(last state of N-step rollout trajectory)
V = 0 if done else value # TODO
# q_val_lst = compute_gae(V, r_lst, masks, s_lst)
q_val_lst = n_step_td(r_lst, V)
agent.learn(s_lst, a_lst, q_val_lst, entropy, optimizer)
###################################이 부분은 수정하지 마세요###################################
episode_rewards.append(epi_reward)
if episode >= 100:
mean_100_episode_reward = mean(episode_rewards)
epi_plot.append(mean_100_episode_reward)
if episode % 10 == 0:
print("Episode: {}, avg score: {:.1f}".format(episode, mean_100_episode_reward))
if mean_100_episode_reward >= 500:
finish = True
print("Solved (1)!!!, Time : {:.2f}".format(time.time() - start_time))
np.save("./single.npy", np.array(epi_plot))
return
env.close()
print("Fail... Retry")
def run(num_episodes):
n_steps = 5 # TODO up to you
Worker(num_episodes, n_steps)
if __name__ == '__main__':
run(2000)
###################################이 부분은 수정하지 마세요###################################
| [
"torch.nn.ReLU",
"torch.nn.Tanh",
"torch.nn.init.constant_",
"numpy.array",
"torch.cuda.is_available",
"gym.make",
"collections.deque",
"torch.distributions.Normal",
"torch.nn.LeakyReLU",
"time.time",
"warnings.filterwarnings",
"torch.cat",
"torch.nn.init.normal_",
"statistics.mean",
"to... | [((69, 102), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (92, 102), False, 'import warnings\n'), ((422, 457), 'gym.make', 'gym.make', (['"""HalfCheetahBulletEnv-v0"""'], {}), "('HalfCheetahBulletEnv-v0')\n", (430, 457), False, 'import gym\n'), ((737, 762), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (760, 762), False, 'import torch\n'), ((774, 817), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (786, 817), False, 'import torch\n'), ((3761, 3796), 'gym.make', 'gym.make', (['"""HalfCheetahBulletEnv-v0"""'], {}), "('HalfCheetahBulletEnv-v0')\n", (3769, 3796), False, 'import gym\n'), ((4002, 4019), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (4007, 4019), False, 'from collections import deque\n'), ((4037, 4048), 'time.time', 'time.time', ([], {}), '()\n', (4046, 4048), False, 'import time\n'), ((882, 926), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight'], {'mean': '(0.0)', 'std': '(0.1)'}), '(m.weight, mean=0.0, std=0.1)\n', (897, 926), True, 'import torch.nn as nn\n'), ((934, 964), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0.1)'], {}), '(m.bias, 0.1)\n', (951, 964), True, 'import torch.nn as nn\n'), ((1146, 1169), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1163, 1169), False, 'import torch\n'), ((1889, 1914), 'torch.distributions.Normal', 'Normal', (['self.mu', 'self.std'], {}), '(self.mu, self.std)\n', (1895, 1914), False, 'from torch.distributions import Normal\n'), ((2532, 2554), 'torch.cat', 'torch.cat', (['logprob_lst'], {}), '(logprob_lst)\n', (2541, 2554), False, 'import torch\n'), ((2630, 2650), 'torch.cat', 'torch.cat', (['state_lst'], {}), '(state_lst)\n', (2639, 2650), False, 'import torch\n'), ((1220, 1246), 'torch.nn.Linear', 'nn.Linear', (['state_size', '(256)'], {}), '(state_size, 256)\n', (1229, 1246), True, 'import torch.nn as nn\n'), ((1260, 1274), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (1272, 1274), True, 'import torch.nn as nn\n'), ((1288, 1307), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(128)'], {}), '(256, 128)\n', (1297, 1307), True, 'import torch.nn as nn\n'), ((1321, 1335), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (1333, 1335), True, 'import torch.nn as nn\n'), ((1349, 1367), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (1358, 1367), True, 'import torch.nn as nn\n'), ((1381, 1395), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (1393, 1395), True, 'import torch.nn as nn\n'), ((1409, 1425), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(1)'], {}), '(64, 1)\n', (1418, 1425), True, 'import torch.nn as nn\n'), ((1485, 1511), 'torch.nn.Linear', 'nn.Linear', (['state_size', '(256)'], {}), '(state_size, 256)\n', (1494, 1511), True, 'import torch.nn as nn\n'), ((1525, 1534), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1532, 1534), True, 'import torch.nn as nn\n'), ((1548, 1575), 'torch.nn.Linear', 'nn.Linear', (['(256)', 'action_size'], {}), '(256, action_size)\n', (1557, 1575), True, 'import torch.nn as nn\n'), ((1589, 1598), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (1596, 1598), True, 'import torch.nn as nn\n'), ((6226, 6247), 'statistics.mean', 'mean', (['episode_rewards'], {}), '(episode_rewards)\n', (6230, 6247), False, 'from statistics import mean, stdev\n'), ((1646, 1672), 'torch.ones', 'torch.ones', (['(1)', 'action_size'], {}), '(1, action_size)\n', (1656, 1672), False, 'import torch\n'), ((2578, 2598), 'torch.cat', 'torch.cat', (['q_val_lst'], {}), '(q_val_lst)\n', (2587, 2598), False, 'import torch\n'), ((6637, 6655), 'numpy.array', 'np.array', (['epi_plot'], {}), '(epi_plot)\n', (6645, 6655), True, 'import numpy as np\n'), ((5600, 5629), 'torch.FloatTensor', 'torch.FloatTensor', (['next_state'], {}), '(next_state)\n', (5617, 5629), False, 'import torch\n'), ((6570, 6581), 'time.time', 'time.time', ([], {}), '()\n', (6579, 6581), False, 'import time\n'), ((4654, 4678), 'torch.FloatTensor', 'torch.FloatTensor', (['state'], {}), '(state)\n', (4671, 4678), False, 'import torch\n'), ((5347, 5374), 'torch.FloatTensor', 'torch.FloatTensor', (['[reward]'], {}), '([reward])\n', (5364, 5374), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2016 by University of Kassel and Fraunhofer Institute for Wind Energy and Energy
# System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
from sys import stderr
from numpy import ones, conj, nonzero, any, exp, pi, r_, argsort, resize, empty, complex128, zeros, int64, array
from scipy.sparse import csr_matrix
from numba import jit
from pypower.idx_bus import BUS_I, GS, BS
from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, BR_B, BR_STATUS, SHIFT, TAP
@jit(nopython=True, cache=True)
def gen_Ybus(Yf_x, Yt_x, Ysh, col_Y, f, t, f_sort, t_sort, nb, nl, r_nl):
"""
Fast calculation of Ybus
"""
r_nb = range(nb)
# allocate data of Ybus in CSR format
# Note: More space is allocated than needed with empty.
# The matrix size will be reduced afterwards
Yx = empty(nb * 5, dtype=complex128) # data
Yp = zeros(nb + 1, dtype=int64) # row pointer
Yj = empty(nb * 5, dtype=int64) # colum indices
# index iterators
# a = iterator of f, b = iterator of t, curRow = current Row
a, b, curRow = 0, 0, 0
# number of nonzeros (total), number of nonzeros per row
nnz, nnz_row = 0, 0
# flag checks if diagonal entry was added
YshAdded = False
for curRow in r_nb:
nnz_row = 0
# iterate rows of Ybus
# add entries from Yf
while a < nl and f[f_sort[a]] == curRow:
# Entries from f_sort[a] in current row of Ybus
for col in (r_nl[f_sort[a]], r_nl[f_sort[a]] + nl):
# 'Has entry at column in Yf: %i ' % col
if col_Y[col] == curRow and not YshAdded:
# add Ysh and Yf_x (diagonal element). If not already added
curVal = Yf_x[col] + Ysh[curRow]
YshAdded = True
else:
# add only Yf_x
curVal = Yf_x[col]
for k in range(Yp[curRow], Yp[curRow] + nnz_row):
if col_Y[col] == Yj[k]:
# if entry at column already exists add value
Yx[k] += curVal
break
else:
# new entry in Ybus
Yx[nnz] = curVal
Yj[nnz] = col_Y[col]
nnz += 1
nnz_row += 1
a += 1
# add entries from Yt
while b < nl and t[t_sort[b]] == curRow:
# Entries from t_sort[b] in current row of Ybus
for col in (r_nl[t_sort[b]], r_nl[t_sort[b]] + nl):
# 'Has entry at column in Yt: %i ' % col
if col_Y[col] == curRow and not YshAdded:
# add Ysh and Yf_x (diagonal element). If not already added
curVal = Yt_x[col] + Ysh[curRow]
YshAdded = True
else:
# add only Yt_x
curVal = Yt_x[col]
for k in range(Yp[curRow], Yp[curRow] + nnz_row):
if col_Y[col] == Yj[k]:
# if entry at column already exists add value
Yx[k] += curVal
break
else:
# new entry in Ybus
Yx[nnz] = curVal
Yj[nnz] = col_Y[col]
nnz += 1
nnz_row += 1
b += 1
if not YshAdded:
# check if diagonal entry was added. If not -> add if not zero
if Ysh[curRow]:
Yx[nnz] = Ysh[curRow]
Yj[nnz] = curRow
nnz += 1
nnz_row += 1
YshAdded = False
# add number of nonzeros in row to row pointer
Yp[curRow + 1] = nnz_row + Yp[curRow]
curRow += 1
return Yx, Yj, Yp, nnz
def makeYbus(baseMVA, bus, branch):
"""Builds the bus admittance matrix and branch admittance matrices.
Returns the full bus admittance matrix (i.e. for all buses) and the
matrices C{Yf} and C{Yt} which, when multiplied by a complex voltage
vector, yield the vector currents injected into each line from the
"from" and "to" buses respectively of each line. Does appropriate
conversions to p.u.
@see: L{makeSbus}
@author: <NAME> (PSERC Cornell)
@author: <NAME>
modified by <NAME> (to use numba) (<EMAIL>)
"""
## constants
nb = bus.shape[0] ## number of buses
nl = branch.shape[0] ## number of lines
## check that bus numbers are equal to indices to bus (one set of bus nums)
if any(bus[:, BUS_I] != list(range(nb))):
stderr.write('buses must appear in order by bus number\n')
## for each branch, compute the elements of the branch admittance matrix where
##
## | If | | Yff Yft | | Vf |
## | | = | | * | |
## | It | | Ytf Ytt | | Vt |
##
stat = branch[:, BR_STATUS] ## ones at in-service branches
Ys = stat / (branch[:, BR_R] + 1j * branch[:, BR_X]) ## series admittance
Bc = stat * branch[:, BR_B] ## line charging susceptance
tap = ones(nl) ## default tap ratio = 1
i = nonzero(branch[:, TAP]) ## indices of non-zero tap ratios
tap[i] = branch[i, TAP] ## assign non-zero tap ratios
tap = tap * exp(1j * pi / 180 * branch[:, SHIFT]) ## add phase shifters
Ytt = Ys + 1j * Bc / 2
Yff = Ytt / (tap * conj(tap))
Yft = - Ys / conj(tap)
Ytf = - Ys / tap
## compute shunt admittance
## if Psh is the real power consumed by the shunt at V = 1.0 p.u.
## and Qsh is the reactive power injected by the shunt at V = 1.0 p.u.
## then Psh - j Qsh = V * conj(Ysh * V) = conj(Ysh) = Gs - j Bs,
## i.e. Ysh = Psh + j Qsh, so ...
## vector of shunt admittances
Ysh = (bus[:, GS] + 1j * bus[:, BS]) / baseMVA
## build connection matrices
f = branch[:, F_BUS].astype(int) ## list of "from" buses
t = branch[:, T_BUS].astype(int) ## list of "to" buses
## build Yf and Yt such that Yf * V is the vector of complex branch currents injected
## at each branch's "from" bus, and Yt is the same for the "to" bus end
i = r_[range(nl), range(nl)] ## double set of row indices
Yf_x = r_[Yff, Yft]
Yt_x = r_[Ytf, Ytt]
col_Y = r_[f, t]
Yf = csr_matrix((Yf_x, (i, col_Y)), (nl, nb))
Yt = csr_matrix((Yt_x, (i, col_Y)), (nl, nb))
Yx, Yj, Yp, nnz = gen_Ybus(Yf_x, Yt_x, Ysh, col_Y, f, t, argsort(f), argsort(t), nb, nl,
array(range(nl), dtype=int64))
Ybus = csr_matrix((resize(Yx, nnz), resize(Yj, nnz), Yp))
return Ybus, Yf, Yt | [
"numpy.ones",
"numpy.conj",
"numpy.exp",
"sys.stderr.write",
"numpy.zeros",
"numba.jit",
"numpy.empty",
"numpy.argsort",
"numpy.nonzero",
"numpy.resize",
"scipy.sparse.csr_matrix"
] | [((606, 636), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (609, 636), False, 'from numba import jit\n'), ((945, 976), 'numpy.empty', 'empty', (['(nb * 5)'], {'dtype': 'complex128'}), '(nb * 5, dtype=complex128)\n', (950, 976), False, 'from numpy import ones, conj, nonzero, any, exp, pi, r_, argsort, resize, empty, complex128, zeros, int64, array\n'), ((993, 1019), 'numpy.zeros', 'zeros', (['(nb + 1)'], {'dtype': 'int64'}), '(nb + 1, dtype=int64)\n', (998, 1019), False, 'from numpy import ones, conj, nonzero, any, exp, pi, r_, argsort, resize, empty, complex128, zeros, int64, array\n'), ((1043, 1069), 'numpy.empty', 'empty', (['(nb * 5)'], {'dtype': 'int64'}), '(nb * 5, dtype=int64)\n', (1048, 1069), False, 'from numpy import ones, conj, nonzero, any, exp, pi, r_, argsort, resize, empty, complex128, zeros, int64, array\n'), ((5306, 5314), 'numpy.ones', 'ones', (['nl'], {}), '(nl)\n', (5310, 5314), False, 'from numpy import ones, conj, nonzero, any, exp, pi, r_, argsort, resize, empty, complex128, zeros, int64, array\n'), ((5374, 5397), 'numpy.nonzero', 'nonzero', (['branch[:, TAP]'], {}), '(branch[:, TAP])\n', (5381, 5397), False, 'from numpy import ones, conj, nonzero, any, exp, pi, r_, argsort, resize, empty, complex128, zeros, int64, array\n'), ((6610, 6650), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(Yf_x, (i, col_Y))', '(nl, nb)'], {}), '((Yf_x, (i, col_Y)), (nl, nb))\n', (6620, 6650), False, 'from scipy.sparse import csr_matrix\n'), ((6660, 6700), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(Yt_x, (i, col_Y))', '(nl, nb)'], {}), '((Yt_x, (i, col_Y)), (nl, nb))\n', (6670, 6700), False, 'from scipy.sparse import csr_matrix\n'), ((4781, 4839), 'sys.stderr.write', 'stderr.write', (['"""buses must appear in order by bus number\n"""'], {}), "('buses must appear in order by bus number\\n')\n", (4793, 4839), False, 'from sys import stderr\n'), ((5536, 5575), 'numpy.exp', 'exp', (['(1.0j * pi / 180 * branch[:, SHIFT])'], {}), '(1.0j * pi / 180 * branch[:, SHIFT])\n', (5539, 5575), False, 'from numpy import ones, conj, nonzero, any, exp, pi, r_, argsort, resize, empty, complex128, zeros, int64, array\n'), ((5675, 5684), 'numpy.conj', 'conj', (['tap'], {}), '(tap)\n', (5679, 5684), False, 'from numpy import ones, conj, nonzero, any, exp, pi, r_, argsort, resize, empty, complex128, zeros, int64, array\n'), ((6763, 6773), 'numpy.argsort', 'argsort', (['f'], {}), '(f)\n', (6770, 6773), False, 'from numpy import ones, conj, nonzero, any, exp, pi, r_, argsort, resize, empty, complex128, zeros, int64, array\n'), ((6775, 6785), 'numpy.argsort', 'argsort', (['t'], {}), '(t)\n', (6782, 6785), False, 'from numpy import ones, conj, nonzero, any, exp, pi, r_, argsort, resize, empty, complex128, zeros, int64, array\n'), ((5647, 5656), 'numpy.conj', 'conj', (['tap'], {}), '(tap)\n', (5651, 5656), False, 'from numpy import ones, conj, nonzero, any, exp, pi, r_, argsort, resize, empty, complex128, zeros, int64, array\n'), ((6880, 6895), 'numpy.resize', 'resize', (['Yx', 'nnz'], {}), '(Yx, nnz)\n', (6886, 6895), False, 'from numpy import ones, conj, nonzero, any, exp, pi, r_, argsort, resize, empty, complex128, zeros, int64, array\n'), ((6897, 6912), 'numpy.resize', 'resize', (['Yj', 'nnz'], {}), '(Yj, nnz)\n', (6903, 6912), False, 'from numpy import ones, conj, nonzero, any, exp, pi, r_, argsort, resize, empty, complex128, zeros, int64, array\n')] |
import numpy as np
from scipy import optimize
import math
import matplotlib.pyplot as plt
import matplotlib as mpl
import ipywidgets as widgets
from ipywidgets import interact, interact_manual
def interactive_capdemand(q_0,a,a_base,amin,amax,b_0,b_base,bmin,bmax,k_0,k_base,kmin,kmax,theta,theta_base,thetamin,thetamax,kbar,kplot_max):
def plot_demand(theta,a,b_0,k_0):
cd = np.empty(q_0.size)
cd[q_0<theta] = np.inf
cd[q_0<(b_0/k_0)] = 0
cd[q_0>(theta+a)] = 0
cd[(q_0>=max(theta, b_0/k_0)) & (q_0<=theta+a)] = (q_0[(q_0>=max(theta, b_0/k_0)) & (q_0<=theta+a)]*k_0-b_0)/(q_0[(q_0>=max(theta, b_0/k_0)) & (q_0<=theta+a)]-theta)
cs = np.minimum(kbar*np.ones(q_0.size), kbar+q_0-1-theta)
fig = plt.figure(frameon=False, figsize=(8,5), dpi=100)
ax = fig.add_subplot(1,1,1)
ax.plot(q_0,cd)
ax.plot(q_0,cs)
ax.set_xlim([q_0[0], q_0[-1]])
ax.set_ylim([0, kplot_max])
ax.set_xlabel('$q_0$')
ax.set_ylabel('$k_1$')
plt.legend(('Demand', 'Residual supply'), loc='lower left')
fig.tight_layout()
widgets.interact(plot_demand,
theta = widgets.FloatSlider(
description="$\\theta$",
min = thetamin,
max = thetamax,
step = 0.05,
value = theta_base
),
a = widgets.FloatSlider(
description="$a$",
min = amin,
max = amax,
step = 0.05,
value = a_base
),
b_0 = widgets.FloatSlider(
description="$b_0$",
min =bmin,
max =bmax,
step = 0.05,
value = b_base
),
k_0 = widgets.FloatSlider(
description="$k_0$",
min = kmin,
max = kmax,
step = 0.05,
value = k_base
)) | [
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.empty",
"ipywidgets.FloatSlider",
"matplotlib.pyplot.legend"
] | [((388, 406), 'numpy.empty', 'np.empty', (['q_0.size'], {}), '(q_0.size)\n', (396, 406), True, 'import numpy as np\n'), ((752, 802), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'frameon': '(False)', 'figsize': '(8, 5)', 'dpi': '(100)'}), '(frameon=False, figsize=(8, 5), dpi=100)\n', (762, 802), True, 'import matplotlib.pyplot as plt\n'), ((1031, 1090), 'matplotlib.pyplot.legend', 'plt.legend', (["('Demand', 'Residual supply')"], {'loc': '"""lower left"""'}), "(('Demand', 'Residual supply'), loc='lower left')\n", (1041, 1090), True, 'import matplotlib.pyplot as plt\n'), ((1168, 1273), 'ipywidgets.FloatSlider', 'widgets.FloatSlider', ([], {'description': '"""$\\\\theta$"""', 'min': 'thetamin', 'max': 'thetamax', 'step': '(0.05)', 'value': 'theta_base'}), "(description='$\\\\theta$', min=thetamin, max=thetamax,\n step=0.05, value=theta_base)\n", (1187, 1273), True, 'import ipywidgets as widgets\n'), ((1341, 1429), 'ipywidgets.FloatSlider', 'widgets.FloatSlider', ([], {'description': '"""$a$"""', 'min': 'amin', 'max': 'amax', 'step': '(0.05)', 'value': 'a_base'}), "(description='$a$', min=amin, max=amax, step=0.05, value\n =a_base)\n", (1360, 1429), True, 'import ipywidgets as widgets\n'), ((1498, 1587), 'ipywidgets.FloatSlider', 'widgets.FloatSlider', ([], {'description': '"""$b_0$"""', 'min': 'bmin', 'max': 'bmax', 'step': '(0.05)', 'value': 'b_base'}), "(description='$b_0$', min=bmin, max=bmax, step=0.05,\n value=b_base)\n", (1517, 1587), True, 'import ipywidgets as widgets\n'), ((1655, 1744), 'ipywidgets.FloatSlider', 'widgets.FloatSlider', ([], {'description': '"""$k_0$"""', 'min': 'kmin', 'max': 'kmax', 'step': '(0.05)', 'value': 'k_base'}), "(description='$k_0$', min=kmin, max=kmax, step=0.05,\n value=k_base)\n", (1674, 1744), True, 'import ipywidgets as widgets\n'), ((701, 718), 'numpy.ones', 'np.ones', (['q_0.size'], {}), '(q_0.size)\n', (708, 718), True, 'import numpy as np\n')] |
import os
import malmoenv
import argparse
from pathlib import Path
import time
from PIL import Image
from collections import deque
import gym
from gym import spaces
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from a2c_ppo_acktr import algo, utils
from a2c_ppo_acktr.algo import gail
from arguments import get_args
from a2c_ppo_acktr.envs import make_vec_envs
from a2c_ppo_acktr.model import Policy
from a2c_ppo_acktr.storage import RolloutStorage
from evaluation import evaluate
from guided import *
def main():
args = get_args()
if args.server2 is None:
args.server2 = args.server
xml = Path(args.mission).read_text()
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
log_dir = os.path.expanduser(args.log_dir)
eval_log_dir = log_dir + "_eval"
utils.cleanup_log_dir(log_dir)
utils.cleanup_log_dir(eval_log_dir)
torch.set_num_threads(1)
device = torch.device("cuda:0" if args.cuda else "cpu")
env = malmoenv.make()
env.init(xml, args.port,
server=args.server,
server2=args.server2, port2=args.port2,
role=args.role,
exp_uid=args.experimentUniqueId,
episode=args.episode,
resync=args.resync,
reshape=True,)
#obs_shape = (env.observation_space.shape[2],env.observation_space.shape[0],env.observation_space.shape[1])
obs_shape = env.observation_space.shape
# if len(obs_shape) == 3 and obs_shape[2] in [1, 3]:
# env = TransposeImage(env, op=[2, 0, 1])
if args.guided:
pass
else:
actor_critic = Policy(
obs_shape,
env.action_space,
base_kwargs={'recurrent': args.recurrent_policy})
actor_critic.to(device)
if args.algo == 'a2c':
agent = algo.A2C_ACKTR(
actor_critic,
args.value_loss_coef,
args.entropy_coef,
lr=args.lr,
eps=args.eps,
alpha=args.alpha,
max_grad_norm=args.max_grad_norm)
elif args.algo == 'ppo':
agent = algo.PPO(
actor_critic,
args.clip_param,
args.ppo_epoch,
args.num_mini_batch,
args.value_loss_coef,
args.entropy_coef,
lr=args.lr,
eps=args.eps,
max_grad_norm=args.max_grad_norm)
elif args.algo == 'acktr':
agent = algo.A2C_ACKTR(
actor_critic, args.value_loss_coef, args.entropy_coef, acktr=True)
rollouts = RolloutStorage(args.num_steps, args.num_processes,
obs_shape, env.action_space,
actor_critic.recurrent_hidden_state_size)
obs = env.reset()
obs = torch.from_numpy(obs).float().to(device)
rollouts.obs[0].copy_(obs)
rollouts.to(device)
episode_rewards = deque(maxlen=10)
start = time.time()
num_updates = int(
args.num_env_steps) // args.num_steps // args.num_processes
for j in range(num_updates):
if args.use_linear_lr_decay:
# decrease learning rate linearly
utils.update_linear_schedule(
agent.optimizer, j, num_updates,
agent.optimizer.lr if args.algo == "acktr" else args.lr)
for step in range(args.num_steps):
# Sample actions
with torch.no_grad():
value, action, action_log_prob, recurrent_hidden_states = actor_critic.act(
rollouts.obs[step], rollouts.recurrent_hidden_states[step],
rollouts.masks[step])
# Obser reward and next obs
obs, reward, done, infos = env.step(action)
if reward is None:
continue #reward = 0.0
obs = torch.from_numpy(obs).float().to(device)
# for info in infos:
# if 'episode' in info.keys():
# episode_rewards.append(info['episode']['r'])
if done or step > args.episodemaxsteps:
episode_rewards.append(reward)
done = False
obs = env.reset()
obs = torch.from_numpy(obs).float().to(device)
break
# If done then clean the history of observations.
# masks = torch.FloatTensor(
# [[0.0] if done_ else [1.0] for done_ in done])
# bad_masks = torch.FloatTensor(
# [[0.0] if 'bad_transition' in info.keys() else [1.0]
# for info in infos])
# Hardcode for testing
masks = torch.FloatTensor([[1.0]]) #always not done
bad_masks = torch.FloatTensor([[1.0]]) #always good transitions
rollouts.insert(obs, recurrent_hidden_states, action,
action_log_prob, value, torch.FloatTensor([reward]), masks, bad_masks)
#print(reward)
with torch.no_grad():
next_value = actor_critic.get_value(
rollouts.obs[-1], rollouts.recurrent_hidden_states[-1],
rollouts.masks[-1]).detach()
# obs = env.reset()
rollouts.compute_returns(next_value, args.use_gae, args.gamma,
args.gae_lambda, args.use_proper_time_limits)
value_loss, action_loss, dist_entropy = agent.update(rollouts)
total_num_steps = (j + 1) * args.num_processes * args.num_steps
print("{} Steps, Value Loss: {}, Action Loss: {}".format(total_num_steps, value_loss, action_loss))
rollouts.after_update()
if j % args.log_interval == 0 and len(episode_rewards) > 1:
total_num_steps = (j + 1) * args.num_processes * args.num_steps
end = time.time()
print(
"Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n"
.format(j, total_num_steps,
int(total_num_steps / (end - start)),
len(episode_rewards), np.mean(episode_rewards),
np.median(episode_rewards), np.min(episode_rewards),
np.max(episode_rewards), dist_entropy, value_loss,
action_loss))
steps = 0
done = False
# while not done and (args.episodemaxsteps <= 0 or steps < args.episodemaxsteps):
# action = env.action_space.sample()
# obs, reward, done, info = env.step(action)
# obs = torch.from_numpy(obs).float().to(device) #unsqueeze?
# #reward = torch.from_numpy(reward).unsqueeze(dim=1).float()
# steps += 1
# print("reward: " + str(reward))
# # print("done: " + str(done))
# print("obs: " + str(obs))
# # print("info" + info)
# if args.saveimagesteps > 0 and steps % args.saveimagesteps == 0:
# d, h, w = env.observation_space.shape
# img = Image.fromarray(obs.reshape(h, w, d))
# img.save('image' + str(args.role) + '_' + str(steps) + '.png')
time.sleep(.05)
env.close()
if __name__ == '__main__':
main()
| [
"a2c_ppo_acktr.storage.RolloutStorage",
"time.sleep",
"torch.from_numpy",
"torch.cuda.is_available",
"numpy.mean",
"a2c_ppo_acktr.utils.cleanup_log_dir",
"collections.deque",
"malmoenv.make",
"pathlib.Path",
"torch.set_num_threads",
"numpy.max",
"numpy.min",
"arguments.get_args",
"os.path.... | [((596, 606), 'arguments.get_args', 'get_args', ([], {}), '()\n', (604, 606), False, 'from arguments import get_args\n'), ((717, 745), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (734, 745), False, 'import torch\n'), ((750, 787), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (776, 787), False, 'import torch\n'), ((977, 1009), 'os.path.expanduser', 'os.path.expanduser', (['args.log_dir'], {}), '(args.log_dir)\n', (995, 1009), False, 'import os\n'), ((1051, 1081), 'a2c_ppo_acktr.utils.cleanup_log_dir', 'utils.cleanup_log_dir', (['log_dir'], {}), '(log_dir)\n', (1072, 1081), False, 'from a2c_ppo_acktr import algo, utils\n'), ((1086, 1121), 'a2c_ppo_acktr.utils.cleanup_log_dir', 'utils.cleanup_log_dir', (['eval_log_dir'], {}), '(eval_log_dir)\n', (1107, 1121), False, 'from a2c_ppo_acktr import algo, utils\n'), ((1127, 1151), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (1148, 1151), False, 'import torch\n'), ((1165, 1211), 'torch.device', 'torch.device', (["('cuda:0' if args.cuda else 'cpu')"], {}), "('cuda:0' if args.cuda else 'cpu')\n", (1177, 1211), False, 'import torch\n'), ((1222, 1237), 'malmoenv.make', 'malmoenv.make', ([], {}), '()\n', (1235, 1237), False, 'import malmoenv\n'), ((2886, 3012), 'a2c_ppo_acktr.storage.RolloutStorage', 'RolloutStorage', (['args.num_steps', 'args.num_processes', 'obs_shape', 'env.action_space', 'actor_critic.recurrent_hidden_state_size'], {}), '(args.num_steps, args.num_processes, obs_shape, env.\n action_space, actor_critic.recurrent_hidden_state_size)\n', (2900, 3012), False, 'from a2c_ppo_acktr.storage import RolloutStorage\n'), ((3220, 3236), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (3225, 3236), False, 'from collections import deque\n'), ((3250, 3261), 'time.time', 'time.time', ([], {}), '()\n', (3259, 3261), False, 'import time\n'), ((810, 835), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (833, 835), False, 'import torch\n'), ((1863, 1953), 'a2c_ppo_acktr.model.Policy', 'Policy', (['obs_shape', 'env.action_space'], {'base_kwargs': "{'recurrent': args.recurrent_policy}"}), "(obs_shape, env.action_space, base_kwargs={'recurrent': args.\n recurrent_policy})\n", (1869, 1953), False, 'from a2c_ppo_acktr.model import Policy\n'), ((7497, 7513), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (7507, 7513), False, 'import time\n'), ((682, 700), 'pathlib.Path', 'Path', (['args.mission'], {}), '(args.mission)\n', (686, 700), False, 'from pathlib import Path\n'), ((2079, 2231), 'a2c_ppo_acktr.algo.A2C_ACKTR', 'algo.A2C_ACKTR', (['actor_critic', 'args.value_loss_coef', 'args.entropy_coef'], {'lr': 'args.lr', 'eps': 'args.eps', 'alpha': 'args.alpha', 'max_grad_norm': 'args.max_grad_norm'}), '(actor_critic, args.value_loss_coef, args.entropy_coef, lr=\n args.lr, eps=args.eps, alpha=args.alpha, max_grad_norm=args.max_grad_norm)\n', (2093, 2231), False, 'from a2c_ppo_acktr import algo, utils\n'), ((3482, 3605), 'a2c_ppo_acktr.utils.update_linear_schedule', 'utils.update_linear_schedule', (['agent.optimizer', 'j', 'num_updates', "(agent.optimizer.lr if args.algo == 'acktr' else args.lr)"], {}), "(agent.optimizer, j, num_updates, agent.\n optimizer.lr if args.algo == 'acktr' else args.lr)\n", (3510, 3605), False, 'from a2c_ppo_acktr import algo, utils\n'), ((4946, 4972), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0]]'], {}), '([[1.0]])\n', (4963, 4972), False, 'import torch\n'), ((5014, 5040), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0]]'], {}), '([[1.0]])\n', (5031, 5040), False, 'import torch\n'), ((5284, 5299), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5297, 5299), False, 'import torch\n'), ((6098, 6109), 'time.time', 'time.time', ([], {}), '()\n', (6107, 6109), False, 'import time\n'), ((2393, 2578), 'a2c_ppo_acktr.algo.PPO', 'algo.PPO', (['actor_critic', 'args.clip_param', 'args.ppo_epoch', 'args.num_mini_batch', 'args.value_loss_coef', 'args.entropy_coef'], {'lr': 'args.lr', 'eps': 'args.eps', 'max_grad_norm': 'args.max_grad_norm'}), '(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch,\n args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps,\n max_grad_norm=args.max_grad_norm)\n', (2401, 2578), False, 'from a2c_ppo_acktr import algo, utils\n'), ((3724, 3739), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3737, 3739), False, 'import torch\n'), ((5197, 5224), 'torch.FloatTensor', 'torch.FloatTensor', (['[reward]'], {}), '([reward])\n', (5214, 5224), False, 'import torch\n'), ((2771, 2857), 'a2c_ppo_acktr.algo.A2C_ACKTR', 'algo.A2C_ACKTR', (['actor_critic', 'args.value_loss_coef', 'args.entropy_coef'], {'acktr': '(True)'}), '(actor_critic, args.value_loss_coef, args.entropy_coef, acktr\n =True)\n', (2785, 2857), False, 'from a2c_ppo_acktr import algo, utils\n'), ((3101, 3122), 'torch.from_numpy', 'torch.from_numpy', (['obs'], {}), '(obs)\n', (3117, 3122), False, 'import torch\n'), ((6431, 6455), 'numpy.mean', 'np.mean', (['episode_rewards'], {}), '(episode_rewards)\n', (6438, 6455), True, 'import numpy as np\n'), ((6481, 6507), 'numpy.median', 'np.median', (['episode_rewards'], {}), '(episode_rewards)\n', (6490, 6507), True, 'import numpy as np\n'), ((6509, 6532), 'numpy.min', 'np.min', (['episode_rewards'], {}), '(episode_rewards)\n', (6515, 6532), True, 'import numpy as np\n'), ((6558, 6581), 'numpy.max', 'np.max', (['episode_rewards'], {}), '(episode_rewards)\n', (6564, 6581), True, 'import numpy as np\n'), ((4140, 4161), 'torch.from_numpy', 'torch.from_numpy', (['obs'], {}), '(obs)\n', (4156, 4161), False, 'import torch\n'), ((4505, 4526), 'torch.from_numpy', 'torch.from_numpy', (['obs'], {}), '(obs)\n', (4521, 4526), False, 'import torch\n')] |
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''
Non-relativistic magnetizability tensor for UHF
(In testing)
Refs:
[1] <NAME>, J. Chem. Phys., 109, 3185 (1998)
[2] <NAME>, Chem. Phys., 213, 123 (1996)
'''
import time
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import jk
from pyscf.prop.nmr import uhf as uhf_nmr
from pyscf.prop.magnetizability import rhf as rhf_mag
def dia(magobj, gauge_orig=None):
mol = magobj.mol
mf = magobj._scf
mo_energy = magobj._scf.mo_energy
mo_coeff = magobj._scf.mo_coeff
mo_occ = magobj._scf.mo_occ
orboa = mo_coeff[0][:,mo_occ[0] > 0]
orbob = mo_coeff[1][:,mo_occ[1] > 0]
dm0a = numpy.dot(orboa, orboa.T)
dm0b = numpy.dot(orbob, orbob.T)
dm0 = dm0a + dm0b
dme0a = numpy.dot(orboa * mo_energy[0][mo_occ[0] > 0], orboa.T)
dme0b = numpy.dot(orbob * mo_energy[1][mo_occ[1] > 0], orbob.T)
dme0 = dme0a + dme0b
e2 = rhf_mag._get_dia_1e(magobj, gauge_orig, dm0, dme0).ravel()
if gauge_orig is None:
vs = jk.get_jk(mol, [dm0, dm0a, dm0a, dm0b, dm0b],
['ijkl,ji->s2kl',
'ijkl,jk->s1il', 'ijkl,li->s1kj',
'ijkl,jk->s1il', 'ijkl,li->s1kj'],
'int2e_gg1', 's4', 9, hermi=1)
e2 += numpy.einsum('xpq,qp->x', vs[0], dm0)
e2 -= numpy.einsum('xpq,qp->x', vs[1], dm0a) * .5
e2 -= numpy.einsum('xpq,qp->x', vs[2], dm0a) * .5
e2 -= numpy.einsum('xpq,qp->x', vs[3], dm0b) * .5
e2 -= numpy.einsum('xpq,qp->x', vs[4], dm0b) * .5
vk = jk.get_jk(mol, [dm0a, dm0b], ['ijkl,jk->s1il', 'ijkl,jk->s1il'],
'int2e_g1g2', 'aa4', 9, hermi=0)
e2 -= numpy.einsum('xpq,qp->x', vk[0], dm0a)
e2 -= numpy.einsum('xpq,qp->x', vk[1], dm0b)
return -e2.reshape(3, 3)
# Note mo10 is the imaginary part of MO^1
def para(magobj, gauge_orig=None, h1=None, s1=None, with_cphf=None):
'''Paramagnetic susceptibility tensor
Kwargs:
h1: A list of arrays. Shapes are [(3,nmo_a,nocc_a), (3,nmo_b,nocc_b)]
First order Fock matrices in MO basis.
s1: A list of arrays. Shapes are [(3,nmo_a,nocc_a), (3,nmo_b,nocc_b)]
First order overlap matrices in MO basis.
with_cphf : boolean or function(dm_mo) => v1_mo
If a boolean value is given, the value determines whether CPHF
equation will be solved or not. The induced potential will be
generated by the function gen_vind.
If a function is given, CPHF equation will be solved, and the
given function is used to compute induced potential
'''
log = logger.Logger(magobj.stdout, magobj.verbose)
cput1 = (time.clock(), time.time())
mol = magobj.mol
mf = magobj._scf
mo_energy = magobj._scf.mo_energy
mo_coeff = magobj._scf.mo_coeff
mo_occ = magobj._scf.mo_occ
orboa = mo_coeff[0][:,mo_occ[0] > 0]
orbob = mo_coeff[1][:,mo_occ[1] > 0]
if h1 is None:
# Imaginary part of F10
dm0 = (numpy.dot(orboa, orboa.T), numpy.dot(orbob, orbob.T))
h1 = magobj.get_fock(dm0, gauge_orig)
h1 = (lib.einsum('xpq,pi,qj->xij', h1[0], mo_coeff[0].conj(), orboa),
lib.einsum('xpq,pi,qj->xij', h1[1], mo_coeff[1].conj(), orbob))
cput1 = log.timer('first order Fock matrix', *cput1)
if s1 is None:
# Imaginary part of S10
s1 = magobj.get_ovlp(mol, gauge_orig)
s1 = (lib.einsum('xpq,pi,qj->xij', s1, mo_coeff[0].conj(), orboa),
lib.einsum('xpq,pi,qj->xij', s1, mo_coeff[1].conj(), orbob))
with_cphf = magobj.cphf
mo1, mo_e1 = uhf_nmr.solve_mo1(magobj, mo_energy, mo_coeff, mo_occ,
h1, s1, with_cphf)
cput1 = logger.timer(magobj, 'solving mo1 eqn', *cput1)
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
mag_para = numpy.einsum('yji,xji->xy', mo1[0], h1[0])
mag_para+= numpy.einsum('yji,xji->xy', mo1[1], h1[1])
mag_para-= numpy.einsum('yji,xji,i->xy', mo1[0], s1[0], mo_energy[0][occidxa])
mag_para-= numpy.einsum('yji,xji,i->xy', mo1[1], s1[1], mo_energy[1][occidxb])
# + c.c.
mag_para = mag_para + mag_para.conj()
mag_para-= numpy.einsum('xij,yij->xy', s1[0][:,occidxa], mo_e1[0])
mag_para-= numpy.einsum('xij,yij->xy', s1[1][:,occidxb], mo_e1[1])
return -mag_para
class Magnetizability(rhf_mag.Magnetizability):
dia = dia
para = para
get_fock = uhf_nmr.get_fock
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = '''h , 0. 0. 0.
F , 0. 0. .917'''
mol.basis = '631g'
mol.build()
mf = scf.UHF(mol).run()
mag = Magnetizability(mf)
mag.cphf = True
m = mag.kernel()
print(lib.finger(m) - -0.43596639996758657)
mag.gauge_orig = (0,0,1)
m = mag.kernel()
print(lib.finger(m) - -0.76996086788058238)
mag.gauge_orig = (0,0,1)
mag.cphf = False
m = mag.kernel()
print(lib.finger(m) - -0.7973915717274408)
mol = gto.M(atom='''O 0. 0. 0.
H 0. -0.757 0.587
H 0. 0.757 0.587''',
basis='ccpvdz', spin=2)
mf = scf.UHF(mol).run()
mag = Magnetizability(mf)
mag.cphf = True
m = mag.kernel()
print(lib.finger(m) - -4.6700053640388353)
| [
"pyscf.gto.Mole",
"pyscf.scf.UHF",
"pyscf.lib.logger.timer",
"pyscf.gto.M",
"time.clock",
"pyscf.prop.magnetizability.rhf._get_dia_1e",
"pyscf.lib.logger.Logger",
"numpy.dot",
"numpy.einsum",
"pyscf.lib.finger",
"pyscf.prop.nmr.uhf.solve_mo1",
"pyscf.scf.jk.get_jk",
"time.time"
] | [((1332, 1357), 'numpy.dot', 'numpy.dot', (['orboa', 'orboa.T'], {}), '(orboa, orboa.T)\n', (1341, 1357), False, 'import numpy\n'), ((1369, 1394), 'numpy.dot', 'numpy.dot', (['orbob', 'orbob.T'], {}), '(orbob, orbob.T)\n', (1378, 1394), False, 'import numpy\n'), ((1429, 1484), 'numpy.dot', 'numpy.dot', (['(orboa * mo_energy[0][mo_occ[0] > 0])', 'orboa.T'], {}), '(orboa * mo_energy[0][mo_occ[0] > 0], orboa.T)\n', (1438, 1484), False, 'import numpy\n'), ((1497, 1552), 'numpy.dot', 'numpy.dot', (['(orbob * mo_energy[1][mo_occ[1] > 0])', 'orbob.T'], {}), '(orbob * mo_energy[1][mo_occ[1] > 0], orbob.T)\n', (1506, 1552), False, 'import numpy\n'), ((3340, 3384), 'pyscf.lib.logger.Logger', 'logger.Logger', (['magobj.stdout', 'magobj.verbose'], {}), '(magobj.stdout, magobj.verbose)\n', (3353, 3384), False, 'from pyscf.lib import logger\n'), ((4333, 4406), 'pyscf.prop.nmr.uhf.solve_mo1', 'uhf_nmr.solve_mo1', (['magobj', 'mo_energy', 'mo_coeff', 'mo_occ', 'h1', 's1', 'with_cphf'], {}), '(magobj, mo_energy, mo_coeff, mo_occ, h1, s1, with_cphf)\n', (4350, 4406), True, 'from pyscf.prop.nmr import uhf as uhf_nmr\n'), ((4454, 4501), 'pyscf.lib.logger.timer', 'logger.timer', (['magobj', '"""solving mo1 eqn"""', '*cput1'], {}), "(magobj, 'solving mo1 eqn', *cput1)\n", (4466, 4501), False, 'from pyscf.lib import logger\n'), ((4574, 4616), 'numpy.einsum', 'numpy.einsum', (['"""yji,xji->xy"""', 'mo1[0]', 'h1[0]'], {}), "('yji,xji->xy', mo1[0], h1[0])\n", (4586, 4616), False, 'import numpy\n'), ((4632, 4674), 'numpy.einsum', 'numpy.einsum', (['"""yji,xji->xy"""', 'mo1[1]', 'h1[1]'], {}), "('yji,xji->xy', mo1[1], h1[1])\n", (4644, 4674), False, 'import numpy\n'), ((4690, 4757), 'numpy.einsum', 'numpy.einsum', (['"""yji,xji,i->xy"""', 'mo1[0]', 's1[0]', 'mo_energy[0][occidxa]'], {}), "('yji,xji,i->xy', mo1[0], s1[0], mo_energy[0][occidxa])\n", (4702, 4757), False, 'import numpy\n'), ((4773, 4840), 'numpy.einsum', 'numpy.einsum', (['"""yji,xji,i->xy"""', 'mo1[1]', 's1[1]', 'mo_energy[1][occidxb]'], {}), "('yji,xji,i->xy', mo1[1], s1[1], mo_energy[1][occidxb])\n", (4785, 4840), False, 'import numpy\n'), ((4912, 4968), 'numpy.einsum', 'numpy.einsum', (['"""xij,yij->xy"""', 's1[0][:, occidxa]', 'mo_e1[0]'], {}), "('xij,yij->xy', s1[0][:, occidxa], mo_e1[0])\n", (4924, 4968), False, 'import numpy\n'), ((4983, 5039), 'numpy.einsum', 'numpy.einsum', (['"""xij,yij->xy"""', 's1[1][:, occidxb]', 'mo_e1[1]'], {}), "('xij,yij->xy', s1[1][:, occidxb], mo_e1[1])\n", (4995, 5039), False, 'import numpy\n'), ((5263, 5273), 'pyscf.gto.Mole', 'gto.Mole', ([], {}), '()\n', (5271, 5273), False, 'from pyscf import gto\n'), ((5819, 5996), 'pyscf.gto.M', 'gto.M', ([], {'atom': '"""O 0. 0. 0.\n H 0. -0.757 0.587\n H 0. 0.757 0.587"""', 'basis': '"""ccpvdz"""', 'spin': '(2)'}), '(atom=\n """O 0. 0. 0.\n H 0. -0.757 0.587\n H 0. 0.757 0.587"""\n , basis=\'ccpvdz\', spin=2)\n', (5824, 5996), False, 'from pyscf import gto\n'), ((1688, 1859), 'pyscf.scf.jk.get_jk', 'jk.get_jk', (['mol', '[dm0, dm0a, dm0a, dm0b, dm0b]', "['ijkl,ji->s2kl', 'ijkl,jk->s1il', 'ijkl,li->s1kj', 'ijkl,jk->s1il',\n 'ijkl,li->s1kj']", '"""int2e_gg1"""', '"""s4"""', '(9)'], {'hermi': '(1)'}), "(mol, [dm0, dm0a, dm0a, dm0b, dm0b], ['ijkl,ji->s2kl',\n 'ijkl,jk->s1il', 'ijkl,li->s1kj', 'ijkl,jk->s1il', 'ijkl,li->s1kj'],\n 'int2e_gg1', 's4', 9, hermi=1)\n", (1697, 1859), False, 'from pyscf.scf import jk\n'), ((1960, 1997), 'numpy.einsum', 'numpy.einsum', (['"""xpq,qp->x"""', 'vs[0]', 'dm0'], {}), "('xpq,qp->x', vs[0], dm0)\n", (1972, 1997), False, 'import numpy\n'), ((2244, 2345), 'pyscf.scf.jk.get_jk', 'jk.get_jk', (['mol', '[dm0a, dm0b]', "['ijkl,jk->s1il', 'ijkl,jk->s1il']", '"""int2e_g1g2"""', '"""aa4"""', '(9)'], {'hermi': '(0)'}), "(mol, [dm0a, dm0b], ['ijkl,jk->s1il', 'ijkl,jk->s1il'],\n 'int2e_g1g2', 'aa4', 9, hermi=0)\n", (2253, 2345), False, 'from pyscf.scf import jk\n'), ((2379, 2417), 'numpy.einsum', 'numpy.einsum', (['"""xpq,qp->x"""', 'vk[0]', 'dm0a'], {}), "('xpq,qp->x', vk[0], dm0a)\n", (2391, 2417), False, 'import numpy\n'), ((2432, 2470), 'numpy.einsum', 'numpy.einsum', (['"""xpq,qp->x"""', 'vk[1]', 'dm0b'], {}), "('xpq,qp->x', vk[1], dm0b)\n", (2444, 2470), False, 'import numpy\n'), ((3398, 3410), 'time.clock', 'time.clock', ([], {}), '()\n', (3408, 3410), False, 'import time\n'), ((3412, 3423), 'time.time', 'time.time', ([], {}), '()\n', (3421, 3423), False, 'import time\n'), ((1588, 1638), 'pyscf.prop.magnetizability.rhf._get_dia_1e', 'rhf_mag._get_dia_1e', (['magobj', 'gauge_orig', 'dm0', 'dme0'], {}), '(magobj, gauge_orig, dm0, dme0)\n', (1607, 1638), True, 'from pyscf.prop.magnetizability import rhf as rhf_mag\n'), ((2012, 2050), 'numpy.einsum', 'numpy.einsum', (['"""xpq,qp->x"""', 'vs[1]', 'dm0a'], {}), "('xpq,qp->x', vs[1], dm0a)\n", (2024, 2050), False, 'import numpy\n'), ((2070, 2108), 'numpy.einsum', 'numpy.einsum', (['"""xpq,qp->x"""', 'vs[2]', 'dm0a'], {}), "('xpq,qp->x', vs[2], dm0a)\n", (2082, 2108), False, 'import numpy\n'), ((2128, 2166), 'numpy.einsum', 'numpy.einsum', (['"""xpq,qp->x"""', 'vs[3]', 'dm0b'], {}), "('xpq,qp->x', vs[3], dm0b)\n", (2140, 2166), False, 'import numpy\n'), ((2186, 2224), 'numpy.einsum', 'numpy.einsum', (['"""xpq,qp->x"""', 'vs[4]', 'dm0b'], {}), "('xpq,qp->x', vs[4], dm0b)\n", (2198, 2224), False, 'import numpy\n'), ((3723, 3748), 'numpy.dot', 'numpy.dot', (['orboa', 'orboa.T'], {}), '(orboa, orboa.T)\n', (3732, 3748), False, 'import numpy\n'), ((3750, 3775), 'numpy.dot', 'numpy.dot', (['orbob', 'orbob.T'], {}), '(orbob, orbob.T)\n', (3759, 3775), False, 'import numpy\n'), ((5451, 5463), 'pyscf.scf.UHF', 'scf.UHF', (['mol'], {}), '(mol)\n', (5458, 5463), False, 'from pyscf import scf\n'), ((5551, 5564), 'pyscf.lib.finger', 'lib.finger', (['m'], {}), '(m)\n', (5561, 5564), False, 'from pyscf import lib\n'), ((5650, 5663), 'pyscf.lib.finger', 'lib.finger', (['m'], {}), '(m)\n', (5660, 5663), False, 'from pyscf import lib\n'), ((5770, 5783), 'pyscf.lib.finger', 'lib.finger', (['m'], {}), '(m)\n', (5780, 5783), False, 'from pyscf import lib\n'), ((6012, 6024), 'pyscf.scf.UHF', 'scf.UHF', (['mol'], {}), '(mol)\n', (6019, 6024), False, 'from pyscf import scf\n'), ((6112, 6125), 'pyscf.lib.finger', 'lib.finger', (['m'], {}), '(m)\n', (6122, 6125), False, 'from pyscf import lib\n')] |
### ------------------------------------------------------------------------- ###
### Create binary files of raw stim vid luminance values fitted to world cam stim vid presentation timings
### use world camera vids for timing, use raw vid luminance values extracted via bonsai
### also save world cam luminance as sanity check/ground truth
### create monthly averages of both raw live stim vid and world cam sanity check
### output as data files
### NOTE: NEED TO MODIFY FIRST FUNCTION BASED ON LOCATION OF SOURCE DATASET AND INTERMEDIATE PUPIL TRACKING DATA
### WHEN RUNNING FROM TERMINAL: add optional "restart" to delete previous runs of this script and start over
### NOTE: make sure this script is in a directory with a "__init__.py" file, so that this script can be treated as a module
### ------------------------------------------------------------------------- ###
import logging
import os
import glob
import cv2
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import zipfile
import shutil
import fnmatch
import sys
import math
import csv
import argparse
import time
###################################
# SET CURRENT WORKING DIRECTORY
###################################
current_working_directory = os.getcwd()
###################################
# SCRIPT LOGGER
###################################
# grab today's date
now = datetime.datetime.now()
logging.basicConfig(filename="psa01_MonthlyMeans_WorldCam_RawLiveStim_" + now.strftime("%Y-%m-%d_%H-%M-%S") + ".log", filemode='w', level=logging.INFO, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M')
###################################
# FUNCTIONS
###################################
##########################################################
#### MODIFY THIS FIRST FUNCTION BASED ON THE LOCATIONS OF:
# 1) MAIN SURPRISING MINDS SOURCE DATASET (LEFT/RIGHT EYE CAMERA VIDEOS, WORLD CAMERA VIDEO, AND ACCOMPANYING TIMESTAMPS FOR EACH PARTICIPANT)
# AND
# 2) INTERMEDIATE PUPIL SIZE AND LOCATION FILES (WITH ACCOMPANYING WORLD CAM ALIGNMENT IMAGES)
### Current default uses a debugging source dataset
##########################################################
def load_data(location='laptop'):
if location == 'laptop':
data_drive = r"C:\Users\taunsquared\Dropbox\SurprisingMinds\analysis\debuggingData"
analysed_drive = r"C:\Users\taunsquared\Dropbox\SurprisingMinds\analysis\dataPythonWorkflows"
elif location == 'office_real':
data_drive = r"\\Diskstation\SurprisingMinds"
analysed_drive = r"C:\Users\Kampff_Lab\Dropbox\SurprisingMinds\analysis\dataPythonWorkflows"
elif location == 'office_debug':
data_drive = r"C:\Users\Kampff_Lab\Dropbox\SurprisingMinds\analysis\debuggingData"
analysed_drive = r"C:\Users\Kampff_Lab\Dropbox\SurprisingMinds\analysis\dataPythonWorkflows"
# collect input data subfolders
rawStimLum_data = os.path.join(analysed_drive, "rawStimLums")
analysed_folders = sorted(os.listdir(analysed_drive))
daily_csv_files = fnmatch.filter(analysed_folders, 'SurprisingMinds_*')
monthly_extracted_data = fnmatch.filter(analysed_folders, 'MeanStimuli_*')
return data_drive, analysed_drive, rawStimLum_data, analysed_folders, daily_csv_files, monthly_extracted_data
##########################################################
def unpack_to_temp(path_to_zipped, path_to_temp):
try:
# copy zip file to current working directory
#print("Copying {folder} to current working directory...".format(folder=path_to_zipped))
current_working_directory = os.getcwd()
copied_zipped = shutil.copy2(path_to_zipped, current_working_directory)
path_to_copied_zipped = os.path.join(current_working_directory, copied_zipped.split(sep=os.sep)[-1])
# unzip the folder
#print("Unzipping files in {folder}...".format(folder=path_to_copied_zipped))
day_unzipped = zipfile.ZipFile(path_to_copied_zipped, mode="r")
# extract files into temp folder
day_unzipped.extractall(path_to_temp)
# close the unzipped file
day_unzipped.close()
#print("Finished unzipping {folder}!".format(folder=path_to_copied_zipped))
# destroy copied zipped file
#print("Deleting {file}...".format(file=path_to_copied_zipped))
os.remove(path_to_copied_zipped)
#print("Deleted {file}!".format(file=path_to_copied_zipped))
return True
except Exception:
logging.warning("Could not unzip {folder}".format(folder=path_to_zipped))
return False
def list_sub_folders(path_to_root_folder):
# List all sub folders
sub_folders = []
for folder in os.listdir(path_to_root_folder):
if(os.path.isdir(os.path.join(path_to_root_folder, folder))):
sub_folders.append(os.path.join(path_to_root_folder, folder))
return sub_folders
def make_time_buckets(start_timestamp, bucket_size_ms, end_timestamp, fill_pattern):
start_timestamp = start_timestamp.split('+')[0][:-3]
end_timestamp = end_timestamp.split('+')[0][:-3]
buckets_start_time = datetime.datetime.strptime(start_timestamp, "%Y-%m-%dT%H:%M:%S.%f")
buckets_end_time = datetime.datetime.strptime(end_timestamp, "%Y-%m-%dT%H:%M:%S.%f")
current_bucket = buckets_start_time
time_buckets = []
window = datetime.timedelta(milliseconds=bucket_size_ms)
while current_bucket <= buckets_end_time:
time_buckets.append(current_bucket)
current_bucket = current_bucket + window
bucket_list = dict.fromkeys(time_buckets)
for key in time_buckets:
bucket_list[key] = fill_pattern
return bucket_list
def find_nearest_timestamp_key(timestamp_to_check, dict_of_timestamps, time_window):
for key in dict_of_timestamps.keys():
if key <= timestamp_to_check <= (key + time_window):
return key
def supersampled_worldCam_rawLiveVid(video_path, video_timestamps, rawStimVidData_dict, output_folder, bucket_size_ms):
# Get video file details
video_name = video_path.split(os.sep)[-1]
video_date = video_name.split('_')[0]
video_time = video_name.split('_')[1]
video_stim_number = video_name.split('_')[2]
# Open world video
world_vid = cv2.VideoCapture(video_path)
vid_width = int(world_vid.get(3))
vid_height = int(world_vid.get(4))
# create rawLiveVid output array
first_timestamp = video_timestamps[0]
last_timestamp = video_timestamps[-1]
rawLiveVid_initializePattern = np.nan
rawLiveVid_buckets = make_time_buckets(first_timestamp, bucket_size_ms, last_timestamp, rawLiveVid_initializePattern)
sanityCheck_initializePattern = np.empty((vid_height*vid_width,))
sanityCheck_initializePattern[:] = np.nan
worldCam_sanityCheck_buckets = make_time_buckets(first_timestamp, bucket_size_ms, last_timestamp, sanityCheck_initializePattern)
# Loop through 4ms time buckets of world video to find nearest frame and save 2-d matrix of pixel values in that frame
# stimStructure = ['DoNotMove-English', 'Calibration', 'stimuli024', 'stimuli025', 'stimuli026', 'stimuli027', 'stimuli028', 'stimuli029', ]
doNotMove_frameCount = rawStimVidData_dict['DoNotMove-English']['Number of Frames']
calib_frameCount = rawStimVidData_dict['Calibration']['Number of Frames']
# keep track of how many frames have been processed
frame_count = 0
for timestamp in video_timestamps:
# find the time bucket into which this frame falls
timestamp = timestamp.split('+')[0][:-3]
timestamp_dt = datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%f")
bucket_window = datetime.timedelta(milliseconds=bucket_size_ms)
# fill in luminance values from world cam video as a sanity check
currentKey_sanityCheck = find_nearest_timestamp_key(timestamp_dt, worldCam_sanityCheck_buckets, bucket_window)
# Read frame at current position
# should this be at current key??
ret, frame = world_vid.read()
# Make sure the frame exists!
if frame is not None:
# Convert to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# flatten the frame into a list
flattened_gray = gray.ravel()
flattened_gray = flattened_gray.astype(None)
# append to dictionary stim_buckets
worldCam_sanityCheck_buckets[currentKey_sanityCheck] = flattened_gray
# fill in luminance values from raw videos based on timing of framerate in world camera timestamps
currentKey_rLV = find_nearest_timestamp_key(timestamp_dt, rawLiveVid_buckets, bucket_window)
if frame_count < doNotMove_frameCount:
rawVidPhase = 'DoNotMove-English'
frame_index = frame_count
if doNotMove_frameCount <= frame_count < doNotMove_frameCount + calib_frameCount:
rawVidPhase = 'Calibration'
frame_index = frame_count - doNotMove_frameCount
if doNotMove_frameCount + calib_frameCount <= frame_count:
rawVidPhase = video_stim_number
if frame_count < doNotMove_frameCount + calib_frameCount + rawStimVidData_dict[rawVidPhase]['Number of Frames']:
frame_index = frame_count - doNotMove_frameCount - calib_frameCount
else:
break
rawLiveVid_buckets[currentKey_rLV] = rawStimVidData_dict[rawVidPhase]['Luminance per Frame'][frame_index]
#print('Processing frame %d from %s phase (total frame count: %d)' % (frame_index, rawVidPhase, frame_count))
frame_count = frame_count + 1
# release video capture
world_vid.release()
# generate rawLiveVid luminance array output
supersampled_rawLiveVid = []
current_lumVal = 0
for timestamp in sorted(rawLiveVid_buckets.keys()):
if rawLiveVid_buckets[timestamp] is not np.nan:
supersampled_rawLiveVid.append(rawLiveVid_buckets[timestamp])
current_lumVal = rawLiveVid_buckets[timestamp]
else:
supersampled_rawLiveVid.append(current_lumVal)
supersampled_rawLiveVid_array = np.array(supersampled_rawLiveVid)
# generate worldCam sanityCheck luminance array output
supersampled_worldCam = []
current_frame = sanityCheck_initializePattern
for timestamp in sorted(worldCam_sanityCheck_buckets.keys()):
if worldCam_sanityCheck_buckets[timestamp] is not np.nan:
supersampled_worldCam.append(worldCam_sanityCheck_buckets[timestamp])
current_frame = worldCam_sanityCheck_buckets[timestamp]
else:
supersampled_worldCam.append(current_frame)
supersampled_worldCam_array = np.array(supersampled_worldCam)
# return worldCam sanity check
return vid_width, vid_height, supersampled_worldCam_array, supersampled_rawLiveVid_array
def add_to_daily_worldCam_dict(this_trial_world_vid_frames, this_trial_stim_num, daily_world_vid_dict):
# keep track of how many videos are going into the average for this stim
daily_world_vid_dict[this_trial_stim_num]['Vid Count'] = daily_world_vid_dict[this_trial_stim_num].get('Vid Count', 0) + 1
this_trial_stim_vid = {}
for tb, row in enumerate(this_trial_world_vid_frames):
tbucket_num = tb
flattened_frame = row
this_trial_stim_vid[tbucket_num] = flattened_frame
for tbucket in this_trial_stim_vid.keys():
if tbucket in daily_world_vid_dict[this_trial_stim_num].keys():
daily_world_vid_dict[this_trial_stim_num][tbucket]['Trial Count'] = daily_world_vid_dict[this_trial_stim_num][tbucket]['Trial Count'] + 1
daily_world_vid_dict[this_trial_stim_num][tbucket]['Summed Frame'] = daily_world_vid_dict[this_trial_stim_num][tbucket]['Summed Frame'] + this_trial_stim_vid[tbucket]
else:
daily_world_vid_dict[this_trial_stim_num][tbucket] = {'Trial Count': 1, 'Summed Frame': this_trial_stim_vid[tbucket]}
def add_to_daily_rawLiveVid_dict(this_trial_rawLive_vid_frames, this_trial_stim_num, daily_rawLive_vid_dict):
# keep track of how many videos are going into the average for this stim
daily_rawLive_vid_dict[this_trial_stim_num]['Vid Count'] = daily_rawLive_vid_dict[this_trial_stim_num].get('Vid Count', 0) + 1
this_trial_stim_vid = {}
for tb, row in enumerate(this_trial_rawLive_vid_frames):
tbucket_num = tb
flattened_frame = row
this_trial_stim_vid[tbucket_num] = flattened_frame
for tbucket in this_trial_stim_vid.keys():
if tbucket in daily_rawLive_vid_dict[this_trial_stim_num].keys():
daily_rawLive_vid_dict[this_trial_stim_num][tbucket]['Trial Count'] = daily_rawLive_vid_dict[this_trial_stim_num][tbucket]['Trial Count'] + 1
daily_rawLive_vid_dict[this_trial_stim_num][tbucket]['Summed Luminance'] = daily_rawLive_vid_dict[this_trial_stim_num][tbucket]['Summed Luminance'] + this_trial_stim_vid[tbucket]
else:
daily_rawLive_vid_dict[this_trial_stim_num][tbucket] = {'Trial Count': 1, 'Summed Luminance': this_trial_stim_vid[tbucket]}
def calculate_meanPerDay_worldCam(day_worldCam_tbDict):
# intialize time bucket dict for mean worldcam for each day
meanPerDay_worldCam = {key:{'Vid Count':0} for key in stim_vids}
for stim_num in day_worldCam_tbDict.keys():
for key in day_worldCam_tbDict[stim_num].keys():
if key == 'Vid Count':
meanPerDay_worldCam[stim_num]['Vid Count'] = meanPerDay_worldCam[stim_num]['Vid Count'] + day_worldCam_tbDict[stim_num]['Vid Count']
else:
meanFrame = day_worldCam_tbDict[stim_num][key]['Summed Frame'] / day_worldCam_tbDict[stim_num][key]['Trial Count']
meanPerDay_worldCam[stim_num][key] = {'Mean Frame': meanFrame, 'Trial Count': day_worldCam_tbDict[stim_num][key]['Trial Count']}
return meanPerDay_worldCam
def calculate_meanPerDay_rawLiveVid(day_rawLiveVid_tbDict):
# intialize time bucket dict for mean worldcam for each day
meanPerDay_rawLiveVid = {key:{'Vid Count':0} for key in stim_vids}
for stim_num in day_rawLiveVid_tbDict.keys():
for key in day_rawLiveVid_tbDict[stim_num].keys():
if key == 'Vid Count':
meanPerDay_rawLiveVid[stim_num]['Vid Count'] = meanPerDay_rawLiveVid[stim_num]['Vid Count'] + day_rawLiveVid_tbDict[stim_num]['Vid Count']
else:
meanLuminance = day_rawLiveVid_tbDict[stim_num][key]['Summed Luminance'] / day_rawLiveVid_tbDict[stim_num][key]['Trial Count']
meanPerDay_rawLiveVid[stim_num][key] = {'Mean Luminance': meanLuminance, 'Trial Count': day_rawLiveVid_tbDict[stim_num][key]['Trial Count']}
return meanPerDay_rawLiveVid
def save_daily_worldCam_meanFrames(this_day_meanWorld_dict, this_day_month, save_folder):
for stim in this_day_meanWorld_dict.keys():
thisStimMeanWorldCam = []
for timebucket in this_day_meanWorld_dict[stim].keys():
if timebucket == 'Vid Count':
vidCount = this_day_meanWorld_dict[stim][timebucket]
else:
thisMeanFrame = this_day_meanWorld_dict[stim][timebucket]['Mean Frame']
thisMeanFrame_weight = this_day_meanWorld_dict[stim][timebucket]['Trial Count']
if np.nansum(thisMeanFrame) == 0:
continue
else:
thisStimMeanWorldCam.append([timebucket, thisMeanFrame_weight, thisMeanFrame])
thisStimMeanWorldCam_output = save_folder + os.sep + '%s_Stim%d_meanWorldCam_%dVids.npy' % (this_day_month, int(stim), vidCount)
np.save(thisStimMeanWorldCam_output, thisStimMeanWorldCam)
def save_daily_rawLiveStim_meanLums(this_day_rawLiveStim_dict, this_day_month, save_folder):
for stim in this_day_rawLiveStim_dict.keys():
thisStimRawLive = []
for timebucket in this_day_rawLiveStim_dict[stim].keys():
if timebucket == 'Vid Count':
vidCount = this_day_rawLiveStim_dict[stim][timebucket]
else:
thisMeanLum = this_day_rawLiveStim_dict[stim][timebucket]['Mean Luminance']
thisMeanLum_weight = this_day_rawLiveStim_dict[stim][timebucket]['Trial Count']
thisStimRawLive.append([timebucket, thisMeanLum_weight, thisMeanLum])
thisStimRawLive_output = save_folder + os.sep + '%s_Stim%d_meanRawLiveStim_%dVids.npy' % (this_day_month, int(stim), vidCount)
np.save(thisStimRawLive_output, thisStimRawLive)
def extract_daily_means_and_add_to_worldCam_or_rawLiveStim(dailyMean_binaryFiles, this_month_all_worldCam, this_month_all_rawLiveStim):
for daily_mean_file in dailyMean_binaryFiles:
daily_date = os.path.basename(daily_mean_file).split('_')[0]
daily_mean_stim_num = stim_name_to_float[os.path.basename(daily_mean_file).split('_')[1]]
daily_mean_type = os.path.basename(daily_mean_file).split('_')[2]
daily_mean_vid_count = int(os.path.basename(daily_mean_file).split('_')[3][:-8])
if daily_mean_type == 'meanWorldCam':
this_file_dictionary = this_month_all_worldCam
timebucket_mean_name = 'Mean Frame'
if daily_mean_type == 'meanRawLiveStim':
this_file_dictionary = this_month_all_rawLiveStim
timebucket_mean_name = 'Mean Luminance'
this_file_dictionary[daily_mean_stim_num]['Vid Count'] = this_file_dictionary[daily_mean_stim_num]['Vid Count'] + daily_mean_vid_count
daily_mean = np.load(daily_mean_file, allow_pickle=True)
# format of daily_mean: [timebucket, thisTimebucketMean_trialCount, thisTimebucketMean]
for row in daily_mean:
timebucket = row[0]
thisTimebucketMean_trialCount = row[1]
thisTimebucketMean = row[2]
if timebucket in this_file_dictionary[daily_mean_stim_num].keys():
this_file_dictionary[daily_mean_stim_num][timebucket][daily_date] = {'Trial Count': thisTimebucketMean_trialCount, timebucket_mean_name: thisTimebucketMean}
else:
this_file_dictionary[daily_mean_stim_num][timebucket] = {daily_date: {'Trial Count': thisTimebucketMean_trialCount, timebucket_mean_name: thisTimebucketMean}}
def save_monthly_weighted_meanStim(this_month_allStim_dict, stim_type):
monthly_mean_stim = {key:{'Vid Count':0} for key in stim_vids}
if stim_type == 'meanWorldCam':
timebucket_mean_name = 'Mean Frame'
if stim_type == 'meanRawLiveStim':
timebucket_mean_name = 'Mean Luminance'
for stim in this_month_allStim_dict.keys():
for timebucket in this_month_allStim_dict[stim].keys():
if timebucket == 'Vid Count':
monthly_mean_stim[stim]['Vid Count'] = monthly_mean_stim[stim]['Vid Count'] + this_month_allStim_dict[stim]['Vid Count']
else:
weighted_means = []
weights = []
for month in this_month_allStim_dict[stim][timebucket].keys():
weighted_mean = this_month_allStim_dict[stim][timebucket][month]['Trial Count'] * this_month_allStim_dict[stim][timebucket][month][timebucket_mean_name]
weighted_means.append(weighted_mean)
weights.append(this_month_allStim_dict[stim][timebucket][month]['Trial Count'])
weighted_means_array = np.array(weighted_means)
weights_array = np.array(weights)
this_timebucket_weighted_mean = np.sum(weighted_means, axis=0)/np.sum(weights_array)
monthly_mean_stim[stim][timebucket] = {'Trial Count':np.sum(weights_array), timebucket_mean_name: this_timebucket_weighted_mean}
for stim in monthly_mean_stim.keys():
this_stim_weighted_mean = []
for timebucket in monthly_mean_stim[stim].keys():
if timebucket == 'Vid Count':
vid_count = monthly_mean_stim[stim][timebucket]
else:
this_mean_frame = monthly_mean_stim[stim][timebucket][timebucket_mean_name]
this_mean_frame_weight = monthly_mean_stim[stim][timebucket]['Trial Count']
this_stim_weighted_mean.append([timebucket, this_mean_frame_weight, this_mean_frame])
this_stim_weighted_mean_output = monthly_mean_folder + os.sep + '%s_Stim%d_%s_%dVids.npy' % (item_year_month, int(stim), stim_type, vid_count)
np.save(this_stim_weighted_mean_output, this_stim_weighted_mean)
##########################################################
# BEGIN SCRIPT
##########################################################
if __name__=='__main__':
# parse command line input
parser = argparse.ArgumentParser()
parser.add_argument("--a", nargs='?', default="check_string_for_empty")
parser.add_argument("--loc", nargs='?', default='laptop')
args = parser.parse_args()
# clean up current working directory
if 'world_temp' in os.listdir(current_working_directory):
logging.info('Deleting old world_temp folder...')
print('Deleting old world_temp folder...')
shutil.rmtree(os.path.join(current_working_directory, 'world_temp'))
print('Deleted!')
time.sleep(5) # to have time to see that world_temp was in fact deleted
zip_folders = fnmatch.filter(os.listdir(current_working_directory), '*.zip')
if len(zip_folders) > 0:
logging.info('Deleting old zip folders...')
print('Deleting old zip folders...')
for zfolder in zip_folders:
os.remove(os.path.join(current_working_directory, zfolder))
###################################
# DATA AND OUTPUT FILE LOCATIONS
###################################
data_drive, analysed_drive, rawStimLum_data, analysed_folders, daily_csv_files, monthly_extracted_data = load_data(args.loc)
logging.info('MAIN SURPRISING MINDS SOURCE DATASET: %s \n INTERMEDIATE PUPIL SIZE AND LOCATION FILES: %s' % (data_drive, analysed_drive))
print('MAIN SURPRISING MINDS SOURCE DATASET: %s \n INTERMEDIATE PUPIL SIZE AND LOCATION FILES: %s' % (data_drive, analysed_drive))
############################################################################################
### CHECK WHETHER COMPLETELY RESTARTING WORLD VID PROCESSING (DELETES 'world' FOLDERS!!!)...
############################################################################################
if args.a == 'check_string_for_empty':
logging.info('Continuing world cam extraction and raw live stim creation from last session...')
print('Continuing world cam extraction and raw live stim creation from last session...')
elif args.a == 'restart':
logging.warning('Restarting world cam extraction and raw live stim creation, DELETING ALL FILES FROM PREVIOUS SESSIONS!')
print('Restarting world cam extraction and raw live stim creation, DELETING ALL FILES FROM PREVIOUS SESSIONS!')
for folder in daily_csv_files:
subdirs = os.listdir(os.path.join(analysed_drive, folder, 'Analysis'))
if 'world' in subdirs:
shutil.rmtree(os.path.join(analysed_drive, folder, 'Analysis', 'world'))
else:
logging.warning('%s is not a valid optional input to this script! \nContinuing world cam extraction and raw live stim creation from last session...' % (args.a))
print('%s is not a valid optional input to this script! \nContinuing world cam extraction and raw live stim creation from last session...' % (args.a))
###################################
# STIMULUS INFO
###################################
stim_vids = [24.0, 25.0, 26.0, 27.0, 28.0, 29.0]
stim_name_to_float = {"stimuli024": 24.0, "stimuli025": 25.0, "stimuli026": 26.0, "stimuli027": 27.0, "stimuli028": 28.0, "stimuli029": 29.0, 'Stim24':24.0, 'Stim25':25.0, 'Stim26':26.0, 'Stim27':27.0, 'Stim28':28.0, 'Stim29':29.0}
stim_float_to_name = {24.0: "stimuli024", 25.0: "stimuli025", 26.0: "stimuli026", 27.0: "stimuli027", 28.0: "stimuli028", 29.0: "stimuli029"}
###################################
# LOAD RAW VID STIM DATA
###################################
rawStimLum_files = glob.glob(rawStimLum_data + os.sep + '*.csv')
rawStimLum_dict = {}
for rSL_file in rawStimLum_files:
stim_phase = os.path.basename(rSL_file).split('_')[0]
stim_lums = np.genfromtxt(rSL_file, delimiter=',')
thisPhase_lenFrames = len(stim_lums)
rawStimLum_dict[stim_phase] = {'Number of Frames': thisPhase_lenFrames, 'Luminance per Frame': stim_lums}
###################################
# EXTRACT WORLD CAM VID TIMING AND LUMINANCE
# SAVE RAW LIVE VIDEOS FROM EACH TRIAL AS BINARY FILE
###################################
# get the subfolders, sort their names
data_folders = sorted(os.listdir(data_drive))
zipped_data = fnmatch.filter(data_folders, '*.zip')
# first day was debugging the exhibit
zipped_data = zipped_data[1:]
zipped_names = [item[:-4] for item in zipped_data]
# figure out which days have already been analysed
extracted_months = [item.split('_')[1] for item in monthly_extracted_data]
already_extracted_daily = []
for folder in daily_csv_files:
subdirs = os.listdir(os.path.join(analysed_drive, folder, 'Analysis'))
if 'world' in subdirs:
already_extracted_daily.append(folder)
# figure out which days are the last day of data collection for each month of data collection
last_day_each_month = []
current_year_month_day = None
for i, folder in enumerate(zipped_data):
this_year_month_day = folder.split('_')[1][:-4]
this_year_month = this_year_month_day[:-3]
if current_year_month_day == None:
if folder == zipped_data[-1]:
last_day_each_month.append(this_year_month_day)
continue
else:
current_year_month_day = this_year_month_day
continue
if current_year_month_day[:-3] == this_year_month:
if folder == zipped_data[-1]:
last_day_each_month.append(this_year_month_day)
continue
else:
current_year_month_day = this_year_month_day
continue
else:
last_day_each_month.append(current_year_month_day)
current_year_month_day = None
logging.info('Last day of each month: %s' % (last_day_each_month))
# DAYS THAT CANNOT BE UNZIPPED
invalid_zipped = []
# DAYS WITH NO WORLD VIDS (no valid trials)
no_valid_trials = []
# BEGIN WORLD VID FRAME EXTRACTION/AVERAGING
for item in zipped_data:
this_day_date = item[:-4].split('_')[1]
########################################################################
# check to see if this folder has already had world vid frames extracted
# this condition is for when the script is interrupted
########################################################################
if item[:-4] in already_extracted_daily:
logging.info("World vid frames from %s has already been extracted" % (item))
print("World vid frames from %s has already been extracted" % (item))
########################################################################################
# check to see if this folder has already been averaged into a monthly world cam average
########################################################################################
item_year_month = this_day_date[:7]
if item_year_month in extracted_months:
logging.info("World camera frames from %s have already been consolidated into a monthly average" % (item_year_month))
print("World camera frames from %s have already been consolidated into a monthly average" % (item_year_month))
continue
########################################################################################
# if no monthly world cam average made yet for this month
# check that the full month has been extracted by checking for all daily mean worldCam and rawLiveStim binary files
########################################################################################
this_month_extracted = fnmatch.filter(already_extracted_daily, 'SurprisingMinds_' + item_year_month + '*')
for i, day_extracted in enumerate(this_month_extracted):
if day_extracted in no_valid_trials:
logging.warning('No valid trials during %s' % (day_extracted.split('_')[1]))
else:
day_extracted_files = os.listdir(os.path.join(analysed_drive, day_extracted, 'Analysis', 'world'))
if len(day_extracted_files) != 12:
this_month_extracted.pop(i)
this_month_data = fnmatch.filter(zipped_data, 'SurprisingMinds_' + item_year_month + '*')
this_month_invalid = fnmatch.filter(invalid_zipped, item_year_month + '*')
if len(this_month_extracted) != len(this_month_data) + len(this_month_invalid):
logging.info("World camera frames for %s not yet completed" % (item_year_month))
print("World camera frames for %s not yet completed" % (item_year_month))
continue
this_month_no_trials = fnmatch.filter(no_valid_trials, 'SurprisingMinds_' + item_year_month + '*')
if len(this_month_extracted) == len(this_month_no_trials):
logging.info("No valid trials collected during %s" % (item_year_month))
print("No valid trials collected during %s" % (item_year_month))
continue
##################################################################
# full month extracted? make monthly mean worldCam and rawLiveStim
##################################################################
logging.info('This month extraction completed: %s' % (this_month_extracted))
print('This month extraction completed: %s' % (this_month_extracted))
# load daily mean files and organize by worldCam/rawLiveStim and by stim
thisMonth_worldCam = {key:{'Vid Count':0} for key in stim_vids}
thisMonth_rawLiveStim = {key:{'Vid Count':0} for key in stim_vids}
for day_extracted in this_month_extracted:
daily_mean_files = glob.glob(analysed_drive + os.sep + day_extracted + os.sep + 'Analysis' + os.sep + 'world' + os.sep + '*.npy')
extract_daily_means_and_add_to_worldCam_or_rawLiveStim(daily_mean_files, thisMonth_worldCam, thisMonth_rawLiveStim)
# create folder for this month mean files
monthly_mean_folder = analysed_drive + os.sep + 'MeanStimuli_' + item_year_month
if not os.path.exists(monthly_mean_folder):
os.makedirs(monthly_mean_folder)
# take weighted mean at each timebucket and save as monthly mean intermediate binary file
logging.info('Saving monthly weighted mean of worldCam for %s...'%(item_year_month))
print('Saving monthly weighted mean of worldCam for %s...'%(item_year_month))
save_monthly_weighted_meanStim(thisMonth_worldCam, 'meanWorldCam')
logging.info('Saving monthly weighted mean of rawLive for %s...'%(item_year_month))
print('Saving monthly weighted mean of rawLive for %s...'%(item_year_month))
save_monthly_weighted_meanStim(thisMonth_rawLiveStim, 'meanRawLiveStim')
# update list of already extracted months
logging.INFO("Updating list of extracted months...")
analysed_folders = sorted(os.listdir(analysed_drive))
monthly_extracted_data = fnmatch.filter(analysed_folders, 'MeanStimuli_*')
extracted_months = [item.split('_')[1] for item in monthly_extracted_data]
# delete daily mean intermediate files
for day_extracted in this_month_extracted:
daily_mean_folder = os.path.join(analysed_drive, day_extracted, 'Analysis', 'world')
logging.INFO("Deleting daily mean worldCam and rawStim video files for %s..." % (day_extracted.split('_')[1]))
shutil.rmtree(daily_mean_folder)
logging.INFO("Delete successful!")
logging.INFO("Making empty 'world' folder for %s..." % (day_extracted.split('_')[1]))
os.makedirs(daily_mean_folder)
logging.info("Finished averaging world video frames for %s!" % (item_year_month))
print("Finished averaging world video frames for %s!" % (item_year_month))
continue
#############################################################################
# if world vid frames in this folder haven't already been extracted, EXTRACT!
#############################################################################
logging.info("Extracting World Vid frames from folder %s" % (item))
print("Extracting World Vid frames from folder %s" % (item))
# Build relative analysis paths, these folders should already exist
analysis_folder = os.path.join(analysed_drive, item[:-4], "Analysis")
alignment_folder = os.path.join(analysis_folder, "alignment")
if not os.path.exists(analysis_folder):
logging.warning("No Analysis folder exists for folder %s!" % (item))
continue
# grab a folder
day_zipped = os.path.join(data_drive, item)
# create Analysis subfolder for avg world vid data
world_folder = os.path.join(analysis_folder, "world")
# Create world_folder if it doesn't exist
if not os.path.exists(world_folder):
os.makedirs(world_folder)
# create a temp folder in current working directory to store data (contents of unzipped folder)
day_folder = os.path.join(current_working_directory, "world_temp")
# at what time resolution to build raw live stim and world camera data?
bucket_size = 4 #milliseconds
#####################################################################################################
# unzip current zipped folder into temp folder, this function checks whether the folder is unzippable
# if it unzips, the function returns True; if it doesn't unzip, the function returns False
#####################################################################################################
if unpack_to_temp(day_zipped, day_folder):
# List all trial folders
trial_folders = list_sub_folders(day_folder)
num_trials = len(trial_folders)
# intialize time bucket dictionary for world vids
this_day_worldCam_tbucket = {key:{'Vid Count':0} for key in stim_vids}
this_day_world_vids_height = []
this_day_world_vids_width = []
# initialize time bucket dictionary for raw live stim vids
this_day_rawLiveVid_tbucket = {key:{'Vid Count':0} for key in stim_vids}
###################################
# extract world vid from each trial
###################################
current_trial = 0
for trial_folder in trial_folders:
# add exception handling so that a weird day doesn't totally break everything
try:
trial_name = trial_folder.split(os.sep)[-1]
# check that the alignment frame for the day shows the correct start to the exhibit
png_filename = trial_name + '.png'
alignment_png_path = os.path.join(alignment_folder, png_filename)
if os.path.exists(alignment_png_path):
alignment_img = mpimg.imread(alignment_png_path)
alignment_gray = cv2.cvtColor(alignment_img, cv2.COLOR_RGB2GRAY)
monitor_zoom = alignment_gray[60:-200, 110:-110]
monitor_score = np.sum(monitor_zoom)
# pick a pixel where it should be bright because people are centering their eyes in the cameras
if monitor_zoom[115,200]>=0.7:
###################################
# Load CSVs and create timestamps
# ------------------------------
# Get world movie timestamp csv path
world_csv_path = glob.glob(trial_folder + '/*world.csv')[0]
# Get world video filepath
world_video_path = glob.glob(trial_folder + '/*world.avi')[0]
####################################
# while debugging
#world_csv_path = r"C:\Users\taunsquared\Dropbox\SurprisingMinds\analysis\debuggingData\SurprisingMinds_2017-10-14\2017-10-14_09-42-40\2017-10-14_09-42-40_stimuli024_world.csv"
#world_video_path = r"C:\Users\taunsquared\Dropbox\SurprisingMinds\analysis\debuggingData\SurprisingMinds_2017-10-14\2017-10-14_09-42-40\2017-10-14_09-42-40_stimuli024_world.avi"
#world_folder = r"C:\Users\taunsquared\Dropbox\SurprisingMinds\analysis\dataPythonWorkflows\SurprisingMinds_2017-10-14\Analysis\world"
####################################
stimuli_name = world_csv_path.split("_")[-2]
stimuli_number = stim_name_to_float[stimuli_name]
# Load world CSV
world_timestamps = np.genfromtxt(world_csv_path, dtype=np.str, delimiter=' ') # row = timestamp, not frame
### EXTRACT FRAMES FROM WORLD VIDS AND PUT INTO TIME BUCKETS ###
# create a "raw live stimulus video" array by combining framerate info from world cam with luminance values from raw vids
logging.INFO("Extracting world vid frames and creating raw live stim vid for %s..." % os.path.basename(world_video_path))
# save raw live stim vid as binary files and return world cam frames as a sanity check
worldCam_vidWidth, worldCam_vidHeight, worldCam_supersampledFrames, rawLiveVid_supersampledFrames = supersampled_worldCam_rawLiveVid(world_video_path, world_timestamps, rawStimLum_dict, world_folder, bucket_size)
#
# ## SANITY CHECK
# worldCam_meanLum = []
# for frame in worldCam_supersampledFrames:
# worldCam_meanLum.append(np.nansum(frame))
# worldCam_meanLum_array = np.array(worldCam_meanLum)
# plt.plot(worldCam_meanLum_array)
# plt.show()
#
add_to_daily_worldCam_dict(worldCam_supersampledFrames, stimuli_number, this_day_worldCam_tbucket)
this_day_world_vids_width.append(worldCam_vidWidth)
this_day_world_vids_height.append(worldCam_vidHeight)
# ------------------------------
add_to_daily_rawLiveVid_dict(rawLiveVid_supersampledFrames, stimuli_number, this_day_rawLiveVid_tbucket)
# ------------------------------
# Report progress
cv2.destroyAllWindows()
logging.info("Finished Trial: %s" % (current_trial))
print("Finished Trial: %s" % (current_trial))
current_trial = current_trial + 1
else:
logging.warning("Bad trial! Stimulus did not display properly for trial %s" % (current_trial))
print("Bad trial! Stimulus did not display properly for trial %s" % (current_trial))
current_trial = current_trial + 1
else:
logging.warning("No alignment picture exists for trial %s" % (current_trial))
print("No alignment picture exists for trial %s" % (current_trial))
current_trial = current_trial + 1
except Exception:
cv2.destroyAllWindows()
logging.warning("Trial %s failed!" % (current_trial))
print("Trial %s failed!" % (current_trial))
current_trial = current_trial + 1
##################################################
# check that all videos have same height and width
##################################################
if not this_day_world_vids_height:
logging.warning("No world vids averaged for %s" % (this_day_date))
no_valid_trials.append(item)
# delete temporary file with unzipped data contents
logging.INFO("Deleting temp folder of unzipped data...")
shutil.rmtree(day_folder)
logging.INFO("Delete successful!")
continue
if all(x == this_day_world_vids_height[0] for x in this_day_world_vids_height):
if all(x == this_day_world_vids_width[0] for x in this_day_world_vids_width):
unravel_height = this_day_world_vids_height[0]
unravel_width = this_day_world_vids_width[0]
###########################################
# average worldCam sanityCheck for each day
###########################################
logging.info('Calculating mean world camera videos for %s' % (this_day_date))
print('Calculating mean world camera videos for %s' % (this_day_date))
thisDay_meanWorldCam = calculate_meanPerDay_worldCam(this_day_worldCam_tbucket)
logging.info('Saving non-NaN frames of daily mean world camera...')
print('Saving non-NaN frames of daily mean world camera...')
save_daily_worldCam_meanFrames(thisDay_meanWorldCam, this_day_date, world_folder)
###########################################
# average rawLiveStim video for each day
###########################################
logging.info('Calculating mean raw live stim videos for %s' % (this_day_date))
print('Calculating mean raw live stim videos for %s' % (this_day_date))
thisDay_meanRawLiveVid = calculate_meanPerDay_rawLiveVid(this_day_rawLiveVid_tbucket)
logging.info('Saving daily mean raw live stim videos...')
print('Calculating mean raw live stim videos for %s' % (this_day_date))
save_daily_rawLiveStim_meanLums(thisDay_meanRawLiveVid, this_day_date, world_folder)
####################################################
# report progress and update already_extracted_daily
####################################################
already_extracted_daily.append(item[:-4])
logging.info("Finished extracting from %s" % (day_zipped[:-4]))
print("Finished extracting from %s" % (day_zipped[:-4]))
###################################################
# delete temporary file with unzipped data contents
###################################################
logging.INFO("Deleting temp folder of unzipped data...")
shutil.rmtree(day_folder)
logging.INFO("Delete successful!")
else:
logging.warning("Could not unzip data folder for day %s" % (this_day_date))
invalid_zipped.append(this_day_date)
logging.warning("Days that cannot be unzipped: %s" % (invalid_zipped))
#############################################
# check if this was the last day in the month
#############################################
this_day_date = item.split('_')[1][:-4]
item_year_month = this_day_date[:7]
if this_day_date in last_day_each_month:
##################################################
# build monthly mean worldCam and rawLive vid data
##################################################
logging.info("Completed world camera frame extraction and raw live stimuli creation for %s, now building monthly mean world cam and raw live data files..." % (item_year_month))
print("Completed world camera frame extraction and raw live stimuli creation for %s, now building monthly mean world cam and raw live data files..." % (item_year_month))
########################################################################################
# check to see if this folder has already been averaged into a monthly world cam average
########################################################################################
if item_year_month in extracted_months:
logging.info("World camera frames from %s have already been consolidated into a monthly average" % (item_year_month))
print("World camera frames from %s have already been consolidated into a monthly average" % (item_year_month))
continue
########################################################################################
# if no monthly world cam average made yet for this month
# check that the full month has been extracted by checking for all daily mean worldCam and rawLiveStim binary files
########################################################################################
this_month_extracted = fnmatch.filter(already_extracted_daily, 'SurprisingMinds_' + item_year_month + '*')
for i, day_extracted in enumerate(this_month_extracted):
day_extracted_files = os.listdir(os.path.join(analysed_drive, day_extracted, 'Analysis', 'world'))
if len(day_extracted_files) != 12:
this_month_extracted.pop(i)
this_month_data = fnmatch.filter(zipped_data, 'SurprisingMinds_' + item_year_month + '*')
this_month_invalid = fnmatch.filter(invalid_zipped, item_year_month)
if len(this_month_extracted) != len(this_month_data) + len(this_month_invalid):
logging.info("World vid frames for %s not yet completed" % (item_year_month))
print("World vid frames for %s not yet completed" % (item_year_month))
continue
##################################################################
# full month extracted? make monthly mean worldCam and rawLiveStim
##################################################################
logging.info('This month extraction completed: %s' % (this_month_extracted))
print('This month extraction completed: %s' % (this_month_extracted))
# load daily mean files and organize by worldCam/rawLiveStim and by stim
thisMonth_worldCam = {key:{'Vid Count':0} for key in stim_vids}
thisMonth_rawLiveStim = {key:{'Vid Count':0} for key in stim_vids}
for day_extracted in this_month_extracted:
daily_mean_files = glob.glob(analysed_drive + os.sep + day_extracted + os.sep + 'Analysis' + os.sep + 'world' + os.sep + '*.npy')
extract_daily_means_and_add_to_worldCam_or_rawLiveStim(daily_mean_files, thisMonth_worldCam, thisMonth_rawLiveStim)
# create folder for this month mean files
monthly_mean_folder = analysed_drive + os.sep + 'MeanStimuli_' + item_year_month
if not os.path.exists(monthly_mean_folder):
os.makedirs(monthly_mean_folder)
# take weighted mean at each timebucket and save as monthly mean intermediate binary file
logging.info('Saving monthly weighted mean of worldCam for %s...'%(item_year_month))
print('Saving monthly weighted mean of worldCam for %s...'%(item_year_month))
save_monthly_weighted_meanStim(thisMonth_worldCam, 'meanWorldCam')
logging.info('Saving monthly weighted mean of rawLive for %s...'%(item_year_month))
print('Saving monthly weighted mean of rawLive for %s...'%(item_year_month))
save_monthly_weighted_meanStim(thisMonth_rawLiveStim, 'meanRawLiveStim')
# update list of already extracted months
logging.INFO("Updating list of extracted months...")
analysed_folders = sorted(os.listdir(analysed_drive))
monthly_extracted_data = fnmatch.filter(analysed_folders, 'MeanStimuli_*')
extracted_months = [item.split('_')[1] for item in monthly_extracted_data]
# delete daily mean intermediate files
for day_extracted in this_month_extracted:
daily_mean_folder = os.path.join(analysed_drive, day_extracted, 'Analysis', 'world')
logging.INFO("Deleting daily mean worldCam and rawStim video files for %s..." % (day_extracted.split('_')[1]))
shutil.rmtree(daily_mean_folder)
logging.INFO("Delete successful!")
logging.INFO("Making empty 'world' folder for %s..." % (day_extracted.split('_')[1]))
os.makedirs(daily_mean_folder)
logging.info("Finished averaging world video frames for %s!" % (item_year_month))
print("Finished averaging world video frames for %s!" % (item_year_month))
logging.info("Completed world camera frame extraction and raw live stimuli creation on all data folders in this drive!")
print("Completed world camera frame extraction and raw live stimuli creation on all data folders in this drive!")
#FIN
| [
"zipfile.ZipFile",
"matplotlib.image.imread",
"time.sleep",
"numpy.array",
"cv2.destroyAllWindows",
"datetime.timedelta",
"logging.info",
"numpy.genfromtxt",
"numpy.save",
"os.remove",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"shutil.copy2",
"logging.INFO",
"numpy.emp... | [((1262, 1273), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1271, 1273), False, 'import os\n'), ((1388, 1411), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1409, 1411), False, 'import datetime\n'), ((2942, 2985), 'os.path.join', 'os.path.join', (['analysed_drive', '"""rawStimLums"""'], {}), "(analysed_drive, 'rawStimLums')\n", (2954, 2985), False, 'import os\n'), ((3066, 3119), 'fnmatch.filter', 'fnmatch.filter', (['analysed_folders', '"""SurprisingMinds_*"""'], {}), "(analysed_folders, 'SurprisingMinds_*')\n", (3080, 3119), False, 'import fnmatch\n'), ((3149, 3198), 'fnmatch.filter', 'fnmatch.filter', (['analysed_folders', '"""MeanStimuli_*"""'], {}), "(analysed_folders, 'MeanStimuli_*')\n", (3163, 3198), False, 'import fnmatch\n'), ((4717, 4748), 'os.listdir', 'os.listdir', (['path_to_root_folder'], {}), '(path_to_root_folder)\n', (4727, 4748), False, 'import os\n'), ((5139, 5206), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['start_timestamp', '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(start_timestamp, '%Y-%m-%dT%H:%M:%S.%f')\n", (5165, 5206), False, 'import datetime\n'), ((5230, 5295), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['end_timestamp', '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(end_timestamp, '%Y-%m-%dT%H:%M:%S.%f')\n", (5256, 5295), False, 'import datetime\n'), ((5371, 5418), 'datetime.timedelta', 'datetime.timedelta', ([], {'milliseconds': 'bucket_size_ms'}), '(milliseconds=bucket_size_ms)\n', (5389, 5418), False, 'import datetime\n'), ((6277, 6305), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (6293, 6305), False, 'import cv2\n'), ((6704, 6739), 'numpy.empty', 'np.empty', (['(vid_height * vid_width,)'], {}), '((vid_height * vid_width,))\n', (6712, 6739), True, 'import numpy as np\n'), ((10152, 10185), 'numpy.array', 'np.array', (['supersampled_rawLiveVid'], {}), '(supersampled_rawLiveVid)\n', (10160, 10185), True, 'import numpy as np\n'), ((10712, 10743), 'numpy.array', 'np.array', (['supersampled_worldCam'], {}), '(supersampled_worldCam)\n', (10720, 10743), True, 'import numpy as np\n'), ((20718, 20743), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (20741, 20743), False, 'import argparse\n'), ((21874, 22024), 'logging.info', 'logging.info', (['("""MAIN SURPRISING MINDS SOURCE DATASET: %s \n INTERMEDIATE PUPIL SIZE AND LOCATION FILES: %s"""\n % (data_drive, analysed_drive))'], {}), '(\n """MAIN SURPRISING MINDS SOURCE DATASET: %s \n INTERMEDIATE PUPIL SIZE AND LOCATION FILES: %s"""\n % (data_drive, analysed_drive))\n', (21886, 22024), False, 'import logging\n'), ((24214, 24259), 'glob.glob', 'glob.glob', (["(rawStimLum_data + os.sep + '*.csv')"], {}), "(rawStimLum_data + os.sep + '*.csv')\n", (24223, 24259), False, 'import glob\n'), ((24901, 24938), 'fnmatch.filter', 'fnmatch.filter', (['data_folders', '"""*.zip"""'], {}), "(data_folders, '*.zip')\n", (24915, 24938), False, 'import fnmatch\n'), ((26442, 26506), 'logging.info', 'logging.info', (["('Last day of each month: %s' % last_day_each_month)"], {}), "('Last day of each month: %s' % last_day_each_month)\n", (26454, 26506), False, 'import logging\n'), ((50055, 50185), 'logging.info', 'logging.info', (['"""Completed world camera frame extraction and raw live stimuli creation on all data folders in this drive!"""'], {}), "(\n 'Completed world camera frame extraction and raw live stimuli creation on all data folders in this drive!'\n )\n", (50067, 50185), False, 'import logging\n'), ((3016, 3042), 'os.listdir', 'os.listdir', (['analysed_drive'], {}), '(analysed_drive)\n', (3026, 3042), False, 'import os\n'), ((3618, 3629), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3627, 3629), False, 'import os\n'), ((3654, 3709), 'shutil.copy2', 'shutil.copy2', (['path_to_zipped', 'current_working_directory'], {}), '(path_to_zipped, current_working_directory)\n', (3666, 3709), False, 'import shutil\n'), ((3955, 4003), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path_to_copied_zipped'], {'mode': '"""r"""'}), "(path_to_copied_zipped, mode='r')\n", (3970, 4003), False, 'import zipfile\n'), ((4355, 4387), 'os.remove', 'os.remove', (['path_to_copied_zipped'], {}), '(path_to_copied_zipped)\n', (4364, 4387), False, 'import os\n'), ((7598, 7659), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['timestamp', '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(timestamp, '%Y-%m-%dT%H:%M:%S.%f')\n", (7624, 7659), False, 'import datetime\n'), ((7684, 7731), 'datetime.timedelta', 'datetime.timedelta', ([], {'milliseconds': 'bucket_size_ms'}), '(milliseconds=bucket_size_ms)\n', (7702, 7731), False, 'import datetime\n'), ((15663, 15721), 'numpy.save', 'np.save', (['thisStimMeanWorldCam_output', 'thisStimMeanWorldCam'], {}), '(thisStimMeanWorldCam_output, thisStimMeanWorldCam)\n', (15670, 15721), True, 'import numpy as np\n'), ((16509, 16557), 'numpy.save', 'np.save', (['thisStimRawLive_output', 'thisStimRawLive'], {}), '(thisStimRawLive_output, thisStimRawLive)\n', (16516, 16557), True, 'import numpy as np\n'), ((17555, 17598), 'numpy.load', 'np.load', (['daily_mean_file'], {'allow_pickle': '(True)'}), '(daily_mean_file, allow_pickle=True)\n', (17562, 17598), True, 'import numpy as np\n'), ((20450, 20514), 'numpy.save', 'np.save', (['this_stim_weighted_mean_output', 'this_stim_weighted_mean'], {}), '(this_stim_weighted_mean_output, this_stim_weighted_mean)\n', (20457, 20514), True, 'import numpy as np\n'), ((20977, 21014), 'os.listdir', 'os.listdir', (['current_working_directory'], {}), '(current_working_directory)\n', (20987, 21014), False, 'import os\n'), ((21024, 21073), 'logging.info', 'logging.info', (['"""Deleting old world_temp folder..."""'], {}), "('Deleting old world_temp folder...')\n", (21036, 21073), False, 'import logging\n'), ((21236, 21249), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (21246, 21249), False, 'import time\n'), ((21341, 21378), 'os.listdir', 'os.listdir', (['current_working_directory'], {}), '(current_working_directory)\n', (21351, 21378), False, 'import os\n'), ((21426, 21469), 'logging.info', 'logging.info', (['"""Deleting old zip folders..."""'], {}), "('Deleting old zip folders...')\n", (21438, 21469), False, 'import logging\n'), ((22490, 22595), 'logging.info', 'logging.info', (['"""Continuing world cam extraction and raw live stim creation from last session..."""'], {}), "(\n 'Continuing world cam extraction and raw live stim creation from last session...'\n )\n", (22502, 22595), False, 'import logging\n'), ((24405, 24443), 'numpy.genfromtxt', 'np.genfromtxt', (['rSL_file'], {'delimiter': '""","""'}), "(rSL_file, delimiter=',')\n", (24418, 24443), True, 'import numpy as np\n'), ((24859, 24881), 'os.listdir', 'os.listdir', (['data_drive'], {}), '(data_drive)\n', (24869, 24881), False, 'import os\n'), ((33120, 33185), 'logging.info', 'logging.info', (["('Extracting World Vid frames from folder %s' % item)"], {}), "('Extracting World Vid frames from folder %s' % item)\n", (33132, 33185), False, 'import logging\n'), ((33359, 33410), 'os.path.join', 'os.path.join', (['analysed_drive', 'item[:-4]', '"""Analysis"""'], {}), "(analysed_drive, item[:-4], 'Analysis')\n", (33371, 33410), False, 'import os\n'), ((33438, 33480), 'os.path.join', 'os.path.join', (['analysis_folder', '"""alignment"""'], {}), "(analysis_folder, 'alignment')\n", (33450, 33480), False, 'import os\n'), ((33677, 33707), 'os.path.join', 'os.path.join', (['data_drive', 'item'], {}), '(data_drive, item)\n', (33689, 33707), False, 'import os\n'), ((33790, 33828), 'os.path.join', 'os.path.join', (['analysis_folder', '"""world"""'], {}), "(analysis_folder, 'world')\n", (33802, 33828), False, 'import os\n'), ((34087, 34140), 'os.path.join', 'os.path.join', (['current_working_directory', '"""world_temp"""'], {}), "(current_working_directory, 'world_temp')\n", (34099, 34140), False, 'import os\n'), ((4775, 4816), 'os.path.join', 'os.path.join', (['path_to_root_folder', 'folder'], {}), '(path_to_root_folder, folder)\n', (4787, 4816), False, 'import os\n'), ((8168, 8207), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2GRAY'], {}), '(frame, cv2.COLOR_RGB2GRAY)\n', (8180, 8207), False, 'import cv2\n'), ((21147, 21200), 'os.path.join', 'os.path.join', (['current_working_directory', '"""world_temp"""'], {}), "(current_working_directory, 'world_temp')\n", (21159, 21200), False, 'import os\n'), ((22721, 22852), 'logging.warning', 'logging.warning', (['"""Restarting world cam extraction and raw live stim creation, DELETING ALL FILES FROM PREVIOUS SESSIONS!"""'], {}), "(\n 'Restarting world cam extraction and raw live stim creation, DELETING ALL FILES FROM PREVIOUS SESSIONS!'\n )\n", (22736, 22852), False, 'import logging\n'), ((23227, 23398), 'logging.warning', 'logging.warning', (['("""%s is not a valid optional input to this script! \nContinuing world cam extraction and raw live stim creation from last session..."""\n % args.a)'], {}), '(\n """%s is not a valid optional input to this script! \nContinuing world cam extraction and raw live stim creation from last session..."""\n % args.a)\n', (23242, 23398), False, 'import logging\n'), ((25301, 25349), 'os.path.join', 'os.path.join', (['analysed_drive', 'folder', '"""Analysis"""'], {}), "(analysed_drive, folder, 'Analysis')\n", (25313, 25349), False, 'import os\n'), ((27136, 27210), 'logging.info', 'logging.info', (["('World vid frames from %s has already been extracted' % item)"], {}), "('World vid frames from %s has already been extracted' % item)\n", (27148, 27210), False, 'import logging\n'), ((28419, 28506), 'fnmatch.filter', 'fnmatch.filter', (['already_extracted_daily', "('SurprisingMinds_' + item_year_month + '*')"], {}), "(already_extracted_daily, 'SurprisingMinds_' +\n item_year_month + '*')\n", (28433, 28506), False, 'import fnmatch\n'), ((29000, 29071), 'fnmatch.filter', 'fnmatch.filter', (['zipped_data', "('SurprisingMinds_' + item_year_month + '*')"], {}), "(zipped_data, 'SurprisingMinds_' + item_year_month + '*')\n", (29014, 29071), False, 'import fnmatch\n'), ((29105, 29158), 'fnmatch.filter', 'fnmatch.filter', (['invalid_zipped', "(item_year_month + '*')"], {}), "(invalid_zipped, item_year_month + '*')\n", (29119, 29158), False, 'import fnmatch\n'), ((29498, 29573), 'fnmatch.filter', 'fnmatch.filter', (['no_valid_trials', "('SurprisingMinds_' + item_year_month + '*')"], {}), "(no_valid_trials, 'SurprisingMinds_' + item_year_month + '*')\n", (29512, 29573), False, 'import fnmatch\n'), ((30088, 30162), 'logging.info', 'logging.info', (["('This month extraction completed: %s' % this_month_extracted)"], {}), "('This month extraction completed: %s' % this_month_extracted)\n", (30100, 30162), False, 'import logging\n'), ((31186, 31274), 'logging.info', 'logging.info', (["('Saving monthly weighted mean of worldCam for %s...' % item_year_month)"], {}), "('Saving monthly weighted mean of worldCam for %s...' %\n item_year_month)\n", (31198, 31274), False, 'import logging\n'), ((31452, 31539), 'logging.info', 'logging.info', (["('Saving monthly weighted mean of rawLive for %s...' % item_year_month)"], {}), "('Saving monthly weighted mean of rawLive for %s...' %\n item_year_month)\n", (31464, 31539), False, 'import logging\n'), ((31776, 31828), 'logging.INFO', 'logging.INFO', (['"""Updating list of extracted months..."""'], {}), "('Updating list of extracted months...')\n", (31788, 31828), False, 'import logging\n'), ((31932, 31981), 'fnmatch.filter', 'fnmatch.filter', (['analysed_folders', '"""MeanStimuli_*"""'], {}), "(analysed_folders, 'MeanStimuli_*')\n", (31946, 31981), False, 'import fnmatch\n'), ((32664, 32743), 'logging.info', 'logging.info', (["('Finished averaging world video frames for %s!' % item_year_month)"], {}), "('Finished averaging world video frames for %s!' % item_year_month)\n", (32676, 32743), False, 'import logging\n'), ((33496, 33527), 'os.path.exists', 'os.path.exists', (['analysis_folder'], {}), '(analysis_folder)\n', (33510, 33527), False, 'import os\n'), ((33541, 33607), 'logging.warning', 'logging.warning', (["('No Analysis folder exists for folder %s!' % item)"], {}), "('No Analysis folder exists for folder %s!' % item)\n", (33556, 33607), False, 'import logging\n'), ((33894, 33922), 'os.path.exists', 'os.path.exists', (['world_folder'], {}), '(world_folder)\n', (33908, 33922), False, 'import os\n'), ((33936, 33961), 'os.makedirs', 'os.makedirs', (['world_folder'], {}), '(world_folder)\n', (33947, 33961), False, 'import os\n'), ((42106, 42181), 'logging.info', 'logging.info', (["('Calculating mean world camera videos for %s' % this_day_date)"], {}), "('Calculating mean world camera videos for %s' % this_day_date)\n", (42118, 42181), False, 'import logging\n'), ((42371, 42438), 'logging.info', 'logging.info', (['"""Saving non-NaN frames of daily mean world camera..."""'], {}), "('Saving non-NaN frames of daily mean world camera...')\n", (42383, 42438), False, 'import logging\n'), ((42783, 42859), 'logging.info', 'logging.info', (["('Calculating mean raw live stim videos for %s' % this_day_date)"], {}), "('Calculating mean raw live stim videos for %s' % this_day_date)\n", (42795, 42859), False, 'import logging\n'), ((43056, 43113), 'logging.info', 'logging.info', (['"""Saving daily mean raw live stim videos..."""'], {}), "('Saving daily mean raw live stim videos...')\n", (43068, 43113), False, 'import logging\n'), ((43556, 43617), 'logging.info', 'logging.info', (["('Finished extracting from %s' % day_zipped[:-4])"], {}), "('Finished extracting from %s' % day_zipped[:-4])\n", (43568, 43617), False, 'import logging\n'), ((43893, 43949), 'logging.INFO', 'logging.INFO', (['"""Deleting temp folder of unzipped data..."""'], {}), "('Deleting temp folder of unzipped data...')\n", (43905, 43949), False, 'import logging\n'), ((43962, 43987), 'shutil.rmtree', 'shutil.rmtree', (['day_folder'], {}), '(day_folder)\n', (43975, 43987), False, 'import shutil\n'), ((44000, 44034), 'logging.INFO', 'logging.INFO', (['"""Delete successful!"""'], {}), "('Delete successful!')\n", (44012, 44034), False, 'import logging\n'), ((44061, 44134), 'logging.warning', 'logging.warning', (["('Could not unzip data folder for day %s' % this_day_date)"], {}), "('Could not unzip data folder for day %s' % this_day_date)\n", (44076, 44134), False, 'import logging\n'), ((44198, 44266), 'logging.warning', 'logging.warning', (["('Days that cannot be unzipped: %s' % invalid_zipped)"], {}), "('Days that cannot be unzipped: %s' % invalid_zipped)\n", (44213, 44266), False, 'import logging\n'), ((44773, 44957), 'logging.info', 'logging.info', (["('Completed world camera frame extraction and raw live stimuli creation for %s, now building monthly mean world cam and raw live data files...'\n % item_year_month)"], {}), "(\n 'Completed world camera frame extraction and raw live stimuli creation for %s, now building monthly mean world cam and raw live data files...'\n % item_year_month)\n", (44785, 44957), False, 'import logging\n'), ((46208, 46295), 'fnmatch.filter', 'fnmatch.filter', (['already_extracted_daily', "('SurprisingMinds_' + item_year_month + '*')"], {}), "(already_extracted_daily, 'SurprisingMinds_' +\n item_year_month + '*')\n", (46222, 46295), False, 'import fnmatch\n'), ((46605, 46676), 'fnmatch.filter', 'fnmatch.filter', (['zipped_data', "('SurprisingMinds_' + item_year_month + '*')"], {}), "(zipped_data, 'SurprisingMinds_' + item_year_month + '*')\n", (46619, 46676), False, 'import fnmatch\n'), ((46710, 46757), 'fnmatch.filter', 'fnmatch.filter', (['invalid_zipped', 'item_year_month'], {}), '(invalid_zipped, item_year_month)\n', (46724, 46757), False, 'import fnmatch\n'), ((47305, 47379), 'logging.info', 'logging.info', (["('This month extraction completed: %s' % this_month_extracted)"], {}), "('This month extraction completed: %s' % this_month_extracted)\n", (47317, 47379), False, 'import logging\n'), ((48403, 48491), 'logging.info', 'logging.info', (["('Saving monthly weighted mean of worldCam for %s...' % item_year_month)"], {}), "('Saving monthly weighted mean of worldCam for %s...' %\n item_year_month)\n", (48415, 48491), False, 'import logging\n'), ((48669, 48756), 'logging.info', 'logging.info', (["('Saving monthly weighted mean of rawLive for %s...' % item_year_month)"], {}), "('Saving monthly weighted mean of rawLive for %s...' %\n item_year_month)\n", (48681, 48756), False, 'import logging\n'), ((48993, 49045), 'logging.INFO', 'logging.INFO', (['"""Updating list of extracted months..."""'], {}), "('Updating list of extracted months...')\n", (49005, 49045), False, 'import logging\n'), ((49149, 49198), 'fnmatch.filter', 'fnmatch.filter', (['analysed_folders', '"""MeanStimuli_*"""'], {}), "(analysed_folders, 'MeanStimuli_*')\n", (49163, 49198), False, 'import fnmatch\n'), ((49881, 49960), 'logging.info', 'logging.info', (["('Finished averaging world video frames for %s!' % item_year_month)"], {}), "('Finished averaging world video frames for %s!' % item_year_month)\n", (49893, 49960), False, 'import logging\n'), ((4851, 4892), 'os.path.join', 'os.path.join', (['path_to_root_folder', 'folder'], {}), '(path_to_root_folder, folder)\n', (4863, 4892), False, 'import os\n'), ((19423, 19447), 'numpy.array', 'np.array', (['weighted_means'], {}), '(weighted_means)\n', (19431, 19447), True, 'import numpy as np\n'), ((19480, 19497), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (19488, 19497), True, 'import numpy as np\n'), ((21573, 21621), 'os.path.join', 'os.path.join', (['current_working_directory', 'zfolder'], {}), '(current_working_directory, zfolder)\n', (21585, 21621), False, 'import os\n'), ((27714, 27839), 'logging.info', 'logging.info', (["('World camera frames from %s have already been consolidated into a monthly average'\n % item_year_month)"], {}), "(\n 'World camera frames from %s have already been consolidated into a monthly average'\n % item_year_month)\n", (27726, 27839), False, 'import logging\n'), ((29267, 29345), 'logging.info', 'logging.info', (["('World camera frames for %s not yet completed' % item_year_month)"], {}), "('World camera frames for %s not yet completed' % item_year_month)\n", (29279, 29345), False, 'import logging\n'), ((29661, 29730), 'logging.info', 'logging.info', (["('No valid trials collected during %s' % item_year_month)"], {}), "('No valid trials collected during %s' % item_year_month)\n", (29673, 29730), False, 'import logging\n'), ((30577, 30691), 'glob.glob', 'glob.glob', (["(analysed_drive + os.sep + day_extracted + os.sep + 'Analysis' + os.sep +\n 'world' + os.sep + '*.npy')"], {}), "(analysed_drive + os.sep + day_extracted + os.sep + 'Analysis' +\n os.sep + 'world' + os.sep + '*.npy')\n", (30586, 30691), False, 'import glob\n'), ((30986, 31021), 'os.path.exists', 'os.path.exists', (['monthly_mean_folder'], {}), '(monthly_mean_folder)\n', (31000, 31021), False, 'import os\n'), ((31039, 31071), 'os.makedirs', 'os.makedirs', (['monthly_mean_folder'], {}), '(monthly_mean_folder)\n', (31050, 31071), False, 'import os\n'), ((31867, 31893), 'os.listdir', 'os.listdir', (['analysed_drive'], {}), '(analysed_drive)\n', (31877, 31893), False, 'import os\n'), ((32211, 32275), 'os.path.join', 'os.path.join', (['analysed_drive', 'day_extracted', '"""Analysis"""', '"""world"""'], {}), "(analysed_drive, day_extracted, 'Analysis', 'world')\n", (32223, 32275), False, 'import os\n'), ((32419, 32451), 'shutil.rmtree', 'shutil.rmtree', (['daily_mean_folder'], {}), '(daily_mean_folder)\n', (32432, 32451), False, 'import shutil\n'), ((32468, 32502), 'logging.INFO', 'logging.INFO', (['"""Delete successful!"""'], {}), "('Delete successful!')\n", (32480, 32502), False, 'import logging\n'), ((32621, 32651), 'os.makedirs', 'os.makedirs', (['daily_mean_folder'], {}), '(daily_mean_folder)\n', (32632, 32651), False, 'import os\n'), ((41237, 41301), 'logging.warning', 'logging.warning', (["('No world vids averaged for %s' % this_day_date)"], {}), "('No world vids averaged for %s' % this_day_date)\n", (41252, 41301), False, 'import logging\n'), ((41433, 41489), 'logging.INFO', 'logging.INFO', (['"""Deleting temp folder of unzipped data..."""'], {}), "('Deleting temp folder of unzipped data...')\n", (41445, 41489), False, 'import logging\n'), ((41506, 41531), 'shutil.rmtree', 'shutil.rmtree', (['day_folder'], {}), '(day_folder)\n', (41519, 41531), False, 'import shutil\n'), ((41548, 41582), 'logging.INFO', 'logging.INFO', (['"""Delete successful!"""'], {}), "('Delete successful!')\n", (41560, 41582), False, 'import logging\n'), ((45503, 45628), 'logging.info', 'logging.info', (["('World camera frames from %s have already been consolidated into a monthly average'\n % item_year_month)"], {}), "(\n 'World camera frames from %s have already been consolidated into a monthly average'\n % item_year_month)\n", (45515, 45628), False, 'import logging\n'), ((46866, 46941), 'logging.info', 'logging.info', (["('World vid frames for %s not yet completed' % item_year_month)"], {}), "('World vid frames for %s not yet completed' % item_year_month)\n", (46878, 46941), False, 'import logging\n'), ((47794, 47908), 'glob.glob', 'glob.glob', (["(analysed_drive + os.sep + day_extracted + os.sep + 'Analysis' + os.sep +\n 'world' + os.sep + '*.npy')"], {}), "(analysed_drive + os.sep + day_extracted + os.sep + 'Analysis' +\n os.sep + 'world' + os.sep + '*.npy')\n", (47803, 47908), False, 'import glob\n'), ((48203, 48238), 'os.path.exists', 'os.path.exists', (['monthly_mean_folder'], {}), '(monthly_mean_folder)\n', (48217, 48238), False, 'import os\n'), ((48256, 48288), 'os.makedirs', 'os.makedirs', (['monthly_mean_folder'], {}), '(monthly_mean_folder)\n', (48267, 48288), False, 'import os\n'), ((49084, 49110), 'os.listdir', 'os.listdir', (['analysed_drive'], {}), '(analysed_drive)\n', (49094, 49110), False, 'import os\n'), ((49428, 49492), 'os.path.join', 'os.path.join', (['analysed_drive', 'day_extracted', '"""Analysis"""', '"""world"""'], {}), "(analysed_drive, day_extracted, 'Analysis', 'world')\n", (49440, 49492), False, 'import os\n'), ((49636, 49668), 'shutil.rmtree', 'shutil.rmtree', (['daily_mean_folder'], {}), '(daily_mean_folder)\n', (49649, 49668), False, 'import shutil\n'), ((49685, 49719), 'logging.INFO', 'logging.INFO', (['"""Delete successful!"""'], {}), "('Delete successful!')\n", (49697, 49719), False, 'import logging\n'), ((49838, 49868), 'os.makedirs', 'os.makedirs', (['daily_mean_folder'], {}), '(daily_mean_folder)\n', (49849, 49868), False, 'import os\n'), ((15337, 15361), 'numpy.nansum', 'np.nansum', (['thisMeanFrame'], {}), '(thisMeanFrame)\n', (15346, 15361), True, 'import numpy as np\n'), ((16766, 16799), 'os.path.basename', 'os.path.basename', (['daily_mean_file'], {}), '(daily_mean_file)\n', (16782, 16799), False, 'import os\n'), ((16938, 16971), 'os.path.basename', 'os.path.basename', (['daily_mean_file'], {}), '(daily_mean_file)\n', (16954, 16971), False, 'import os\n'), ((19546, 19576), 'numpy.sum', 'np.sum', (['weighted_means'], {'axis': '(0)'}), '(weighted_means, axis=0)\n', (19552, 19576), True, 'import numpy as np\n'), ((19577, 19598), 'numpy.sum', 'np.sum', (['weights_array'], {}), '(weights_array)\n', (19583, 19598), True, 'import numpy as np\n'), ((19668, 19689), 'numpy.sum', 'np.sum', (['weights_array'], {}), '(weights_array)\n', (19674, 19689), True, 'import numpy as np\n'), ((23035, 23083), 'os.path.join', 'os.path.join', (['analysed_drive', 'folder', '"""Analysis"""'], {}), "(analysed_drive, folder, 'Analysis')\n", (23047, 23083), False, 'import os\n'), ((24344, 24370), 'os.path.basename', 'os.path.basename', (['rSL_file'], {}), '(rSL_file)\n', (24360, 24370), False, 'import os\n'), ((35866, 35910), 'os.path.join', 'os.path.join', (['alignment_folder', 'png_filename'], {}), '(alignment_folder, png_filename)\n', (35878, 35910), False, 'import os\n'), ((35934, 35968), 'os.path.exists', 'os.path.exists', (['alignment_png_path'], {}), '(alignment_png_path)\n', (35948, 35968), False, 'import os\n'), ((46410, 46474), 'os.path.join', 'os.path.join', (['analysed_drive', 'day_extracted', '"""Analysis"""', '"""world"""'], {}), "(analysed_drive, day_extracted, 'Analysis', 'world')\n", (46422, 46474), False, 'import os\n'), ((16863, 16896), 'os.path.basename', 'os.path.basename', (['daily_mean_file'], {}), '(daily_mean_file)\n', (16879, 16896), False, 'import os\n'), ((23150, 23207), 'os.path.join', 'os.path.join', (['analysed_drive', 'folder', '"""Analysis"""', '"""world"""'], {}), "(analysed_drive, folder, 'Analysis', 'world')\n", (23162, 23207), False, 'import os\n'), ((28797, 28861), 'os.path.join', 'os.path.join', (['analysed_drive', 'day_extracted', '"""Analysis"""', '"""world"""'], {}), "(analysed_drive, day_extracted, 'Analysis', 'world')\n", (28809, 28861), False, 'import os\n'), ((36010, 36042), 'matplotlib.image.imread', 'mpimg.imread', (['alignment_png_path'], {}), '(alignment_png_path)\n', (36022, 36042), True, 'import matplotlib.image as mpimg\n'), ((36084, 36131), 'cv2.cvtColor', 'cv2.cvtColor', (['alignment_img', 'cv2.COLOR_RGB2GRAY'], {}), '(alignment_img, cv2.COLOR_RGB2GRAY)\n', (36096, 36131), False, 'import cv2\n'), ((36245, 36265), 'numpy.sum', 'np.sum', (['monitor_zoom'], {}), '(monitor_zoom)\n', (36251, 36265), True, 'import numpy as np\n'), ((40486, 40561), 'logging.warning', 'logging.warning', (["('No alignment picture exists for trial %s' % current_trial)"], {}), "('No alignment picture exists for trial %s' % current_trial)\n", (40501, 40561), False, 'import logging\n'), ((40769, 40792), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (40790, 40792), False, 'import cv2\n'), ((40813, 40864), 'logging.warning', 'logging.warning', (["('Trial %s failed!' % current_trial)"], {}), "('Trial %s failed!' % current_trial)\n", (40828, 40864), False, 'import logging\n'), ((17021, 17054), 'os.path.basename', 'os.path.basename', (['daily_mean_file'], {}), '(daily_mean_file)\n', (17037, 17054), False, 'import os\n'), ((37920, 37978), 'numpy.genfromtxt', 'np.genfromtxt', (['world_csv_path'], {'dtype': 'np.str', 'delimiter': '""" """'}), "(world_csv_path, dtype=np.str, delimiter=' ')\n", (37933, 37978), True, 'import numpy as np\n'), ((39867, 39890), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (39888, 39890), False, 'import cv2\n'), ((39919, 39969), 'logging.info', 'logging.info', (["('Finished Trial: %s' % current_trial)"], {}), "('Finished Trial: %s' % current_trial)\n", (39931, 39969), False, 'import logging\n'), ((40166, 40262), 'logging.warning', 'logging.warning', (["('Bad trial! Stimulus did not display properly for trial %s' % current_trial)"], {}), "('Bad trial! Stimulus did not display properly for trial %s' %\n current_trial)\n", (40181, 40262), False, 'import logging\n'), ((36738, 36777), 'glob.glob', 'glob.glob', (["(trial_folder + '/*world.csv')"], {}), "(trial_folder + '/*world.csv')\n", (36747, 36777), False, 'import glob\n'), ((36883, 36922), 'glob.glob', 'glob.glob', (["(trial_folder + '/*world.avi')"], {}), "(trial_folder + '/*world.avi')\n", (36892, 36922), False, 'import glob\n'), ((38365, 38399), 'os.path.basename', 'os.path.basename', (['world_video_path'], {}), '(world_video_path)\n', (38381, 38399), False, 'import os\n')] |
import copy
from datetime import datetime
from typing import Callable
import numpy as np
import torch
from ga.individual import statistics
from utils.timing import timing
class Population:
def __init__(self, individual, pop_size, max_generation, p_mutation, p_crossover, p_inversion):
self.pop_size = pop_size
self.max_generation = max_generation
self.p_mutation = p_mutation
self.p_crossover = p_crossover
self.p_inversion = p_inversion
# self.old_population = [copy.copy(individual) for _ in range(pop_size)] # if copy, all weights will be the same
self.old_population = [individual() for _ in range(pop_size)]
self.new_population = []
def set_population(self, population: list):
self.old_population = population
@timing
def run(self, env, run_generation: Callable, verbose=False, log=False, output_folder=None, save_as_pytorch=False):
best_model = sorted(self.old_population, key=lambda ind: ind.fitness, reverse=True)[0]
for i in range(self.max_generation):
print("Generation {}".format(i))
print("Start: Calculate sequentially")
for j in range(len(self.old_population)):
print(f'Calculating {j}')
p = self.old_population[j]
p.calculate_fitness(env)
print("End: Calculate sequentially")
self.new_population = [None for _ in range(self.pop_size)]
run_generation(env,
self.old_population,
self.new_population,
self.p_mutation,
self.p_crossover,
self.p_inversion)
if log:
self.save_logs(i, output_folder)
if verbose:
self.show_stats(i)
self.update_old_population()
new_best_model = self.get_best_model_parameters()
if new_best_model.fitness > best_model.fitness:
print('Saving new best model with fitness: {}'.format(new_best_model.fitness))
self.save_model_parameters(output_folder, i, save_as_pytorch)
best_model = new_best_model
if output_folder:
self.save_model_parameters(output_folder, self.max_generation, save_as_pytorch)
def save_logs(self, n_gen, output_folder):
"""
CSV format -> date,n_generation,mean,min,max
"""
date = self.now()
file_name = 'logs.csv'
mean, t_min, t_max = statistics(self.new_population)
stats = f'{date},{n_gen},{mean},{t_min},{t_max}\n'
with open(output_folder + self.get_file_name_without_date() + file_name, 'a') as f:
f.write(stats)
def show_stats(self, n_gen):
mean, t_min, t_max = statistics(self.new_population)
date = self.now()
stats = f"{date} - generation {n_gen + 1} | mean: {mean}\tmin: {t_min}\tmax: {t_max}\n"
print(stats)
def update_old_population(self):
self.old_population = copy.deepcopy(self.new_population)
def save_model_parameters(self, output_folder, iterations, save_as_pytorch=False):
best_model = self.get_best_model_parameters()
file_name = self.get_file_name(self.now()) + f'_I={iterations}_SCORE={best_model.fitness}.npy'
output_filename = output_folder + '-' + file_name
if save_as_pytorch:
torch.save(best_model.weights_biases, output_filename)
else:
np.save(output_filename, best_model.weights_biases)
def get_best_model_parameters(self) -> np.array:
"""
:return: Weights and biases of the best individual
"""
return sorted(self.new_population, key=lambda ind: ind.fitness, reverse=True)[0]
def get_file_name(self, date):
return '{}_NN={}_POPSIZE={}_GEN={}_PMUTATION_{}_PCROSSOVER_{}_INPUTS_{}'.format(
date,
self.new_population[
0].__class__.__name__,
self.pop_size,
self.max_generation,
self.p_mutation,
self.p_crossover,
self.new_population[
0].input_size
)
def get_file_name_without_date(self):
return 'NN={}_POPSIZE={}_GEN={}_PMUTATION_{}_PCROSSOVER_{}_INPUTS_{}'.format(
self.new_population[0].__class__.__name__,
self.pop_size,
self.max_generation,
self.p_mutation,
self.p_crossover,
self.new_population[0].input_size
)
@staticmethod
def now():
return datetime.now().strftime('%m-%d-%Y_%H-%M')
| [
"ga.individual.statistics",
"datetime.datetime.now",
"torch.save",
"copy.deepcopy",
"numpy.save"
] | [((2579, 2610), 'ga.individual.statistics', 'statistics', (['self.new_population'], {}), '(self.new_population)\n', (2589, 2610), False, 'from ga.individual import statistics\n'), ((2852, 2883), 'ga.individual.statistics', 'statistics', (['self.new_population'], {}), '(self.new_population)\n', (2862, 2883), False, 'from ga.individual import statistics\n'), ((3095, 3129), 'copy.deepcopy', 'copy.deepcopy', (['self.new_population'], {}), '(self.new_population)\n', (3108, 3129), False, 'import copy\n'), ((3473, 3527), 'torch.save', 'torch.save', (['best_model.weights_biases', 'output_filename'], {}), '(best_model.weights_biases, output_filename)\n', (3483, 3527), False, 'import torch\n'), ((3554, 3605), 'numpy.save', 'np.save', (['output_filename', 'best_model.weights_biases'], {}), '(output_filename, best_model.weights_biases)\n', (3561, 3605), True, 'import numpy as np\n'), ((4647, 4661), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4659, 4661), False, 'from datetime import datetime\n')] |
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import List
import numpy as np
import pandas as pd
@dataclass
class Recall:
docid: str
n_items: int
score: float
@classmethod
def from_line(cls, line: str) -> Recall:
try:
n_items, docid, score = line.split()
except ValueError:
raise ValueError(f'Invalid format: {line}')
return Recall(
n_items=int(n_items.replace('recall_', '')),
docid=docid,
score=float(score))
def get_uniq_docids(info_list: List[Recall],
docids: List[str]) -> List[str]:
"""
Given info_list, generate docids
"""
if len(info_list) == 0:
return docids
head, *tails = info_list
if len(docids) > 0:
docid: str = head.docid
if docid == docids[-1]:
return get_uniq_docids(tails, docids)
else:
return get_uniq_docids(tails, docids + [docid])
else:
return get_uniq_docids(tails, [head.docid])
def to_df(path: Path) -> pd.DataFrame:
"""
Load a recall file (.trec file which consists of recalls)
as a pd.DataFrame
"""
with open(path) as fin:
info_list: List[Recall] = [Recall.from_line(line)
for line in fin.read().splitlines()]
docids: List[str] = get_uniq_docids(info_list, [])
mat: np.ndarray = np.array([recall.score for recall in info_list]).reshape(len(docids), -1)
return pd.DataFrame(mat,
index=docids,
columns=[r.n_items for r in info_list[:mat.shape[1]]])
| [
"pandas.DataFrame",
"numpy.array"
] | [((1547, 1638), 'pandas.DataFrame', 'pd.DataFrame', (['mat'], {'index': 'docids', 'columns': '[r.n_items for r in info_list[:mat.shape[1]]]'}), '(mat, index=docids, columns=[r.n_items for r in info_list[:mat.\n shape[1]]])\n', (1559, 1638), True, 'import pandas as pd\n'), ((1462, 1510), 'numpy.array', 'np.array', (['[recall.score for recall in info_list]'], {}), '([recall.score for recall in info_list])\n', (1470, 1510), True, 'import numpy as np\n')] |
"""
Copyright 2021 ETH Zurich, author: <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file gives you the functions for adding descriptive features to
your dataset prior regression learning, most importantly the ability
to add embedded features that describe slowly changing, important
processes in your dataset.
"""
import numpy as np
import xarray.ufuncs as xu
def log_fracmis(data, logtext=""):
frac_mis = (np.isnan(data).sum() / data.size).values
print(f"fraction missing {logtext}: {frac_mis}")
def logscale_precip(data, varname="precipitation"):
"""
log-scale variable "varname" in data
Parameters
----------
data: xarray dataarray, where varname is the variable that needs to be
log-scaled
Returns
----------
data: data with variable log-scaled
"""
# define lower threshold for "no rain"
data.loc[varname] = data.loc[varname].where(data.loc[varname] > 0.000001, -1)
data.loc[varname] = xu.log(data.loc[varname])
# all zero precip got -1, all -1 got nan, all nan get -20
data.loc[varname] = data.loc[varname].where(~np.isnan(data.loc[varname]), -20)
return data
def create_precip_binary(data, varname="precipitation"):
ip = (data.loc[varname] < 0.00001) * 1
ip = ip.rename("ip")
return ip
def normalise(data):
"""
for each variable (i.e. across the dimension 'variable') normalise the data
to mean zero and standard deviation one
Parameters
----------
data: xarray dataarray, with dimensions variable, time, landpoints
Returns
----------
data: data normalised
datamean: mean for each variable, for renormalisation
datastd: std for each variable, for renormalisation
"""
datamean = data.mean(dim=("time", "landpoints"))
datastd = data.std(dim=("time", "landpoints"))
data = (data - datamean) / datastd
return data, datamean, datastd
def stack(data):
# add select variable to remove it in gapfill?
return data.stack(datapoints=("time", "landpoints")).reset_index("datapoints").T
def create_lat_lon_features(constant_maps):
"""
create latitude and longitude as additional feature for data
Parameters
----------
data: xarray dataarray, with dimensions including latitude and longitude
Returns
----------
latitude_arr
longitude_arr
"""
londata, latdata = np.meshgrid(constant_maps.longitude, constant_maps.latitude)
latitude_arr = (("latitude", "longitude"), latdata)
longitude_arr = (("latitude", "longitude"), londata)
return latitude_arr, longitude_arr
def create_time_feature(data):
"""
create timestep as additional feature for data
Parameters
----------
data: xarray dataarray, with dimensions including landpoints, time
Returns
----------
time_arr: xarray with same dimensions as one feature in array describing
time step
"""
_, ntimesteps, nlandpts = data.shape
timedat = np.arange(ntimesteps)
timedat = np.tile(timedat, nlandpts).reshape(nlandpts, *timedat.shape).T
time_arr = (("time", "landpoints"), timedat)
return time_arr
def create_embedded_features(data, varnames, window_size, lag):
"""
for each variable, create embedded features of data with mean over window
size s and time lag l
Parameters
----------
data: xarray dataarray, with dimensions including variable, time
varnames: list of all variables for calculating this embedded feature
window_size: int, window size in days
lag: int, lag of window from today in days
Returns
----------
tmp: embedded features of variables to be added to data
"""
# rolling window average
tmp = (
data.sel(variable=varnames)
.rolling(time=np.abs(lag - window_size), center=False, min_periods=1)
.mean()
)
# overwrite time stamp to current day
tmp = tmp.assign_coords(
time=[time + np.timedelta64(lag, "D") for time in tmp.coords["time"].values]
)
# rename feature to not overwrite variable
tmp = tmp.assign_coords(variable=[f"{var}lag_{window_size}ff" for var in varnames])
# fill missing values in lagged features at beginning or end of time series
varmeans = tmp.mean(dim=("time"))
tmp = tmp.fillna(varmeans)
return tmp
def stack_constant_maps(data, constant_maps):
ntimesteps = data.coords["time"].size
constant_maps = constant_maps.expand_dims({"time": ntimesteps}, axis=1)
# constant_maps = np.repeat(constant_maps, ntimesteps, axis=1)
constant_maps["time"] = data["time"]
return constant_maps
| [
"numpy.tile",
"numpy.abs",
"xarray.ufuncs.log",
"numpy.isnan",
"numpy.timedelta64",
"numpy.meshgrid",
"numpy.arange"
] | [((1453, 1478), 'xarray.ufuncs.log', 'xu.log', (['data.loc[varname]'], {}), '(data.loc[varname])\n', (1459, 1478), True, 'import xarray.ufuncs as xu\n'), ((2873, 2933), 'numpy.meshgrid', 'np.meshgrid', (['constant_maps.longitude', 'constant_maps.latitude'], {}), '(constant_maps.longitude, constant_maps.latitude)\n', (2884, 2933), True, 'import numpy as np\n'), ((3466, 3487), 'numpy.arange', 'np.arange', (['ntimesteps'], {}), '(ntimesteps)\n', (3475, 3487), True, 'import numpy as np\n'), ((1591, 1618), 'numpy.isnan', 'np.isnan', (['data.loc[varname]'], {}), '(data.loc[varname])\n', (1599, 1618), True, 'import numpy as np\n'), ((3502, 3528), 'numpy.tile', 'np.tile', (['timedat', 'nlandpts'], {}), '(timedat, nlandpts)\n', (3509, 3528), True, 'import numpy as np\n'), ((906, 920), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (914, 920), True, 'import numpy as np\n'), ((4275, 4300), 'numpy.abs', 'np.abs', (['(lag - window_size)'], {}), '(lag - window_size)\n', (4281, 4300), True, 'import numpy as np\n'), ((4446, 4470), 'numpy.timedelta64', 'np.timedelta64', (['lag', '"""D"""'], {}), "(lag, 'D')\n", (4460, 4470), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
# K-means step1
def k_means_step1(img, Class=5):
# get shape
H, W, C = img.shape
# initiate random seed
np.random.seed(0)
# reshape
img = np.reshape(img, (H * W, -1))
# select one index randomly
i = np.random.choice(np.arange(H * W), Class, replace=False)
Cs = img[i].copy()
print(Cs)
clss = np.zeros((H * W), dtype=int)
# each pixel
for i in range(H * W):
# get distance from base pixel
dis = np.sqrt(np.sum((Cs - img[i]) ** 2, axis=1))
# get argmin distance
clss[i] = np.argmin(dis)
# show
out = np.reshape(clss, (H, W)) * 50
out = out.astype(np.uint8)
return out
# read image
img = cv2.imread("../imori.jpg").astype(np.float32)
# K-means step2
out = k_means_step1(img)
# cv2.imwrite("out.jpg", out)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"numpy.reshape",
"cv2.imshow",
"numpy.sum",
"numpy.zeros",
"cv2.destroyAllWindows",
"numpy.random.seed",
"numpy.argmin",
"cv2.waitKey",
"numpy.arange",
"cv2.imread"
] | [((826, 851), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'out'], {}), "('result', out)\n", (836, 851), False, 'import cv2\n'), ((852, 866), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (863, 866), False, 'import cv2\n'), ((867, 890), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (888, 890), False, 'import cv2\n'), ((196, 213), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (210, 213), True, 'import numpy as np\n'), ((233, 261), 'numpy.reshape', 'np.reshape', (['img', '(H * W, -1)'], {}), '(img, (H * W, -1))\n', (243, 261), True, 'import numpy as np\n'), ((395, 421), 'numpy.zeros', 'np.zeros', (['(H * W)'], {'dtype': 'int'}), '(H * W, dtype=int)\n', (403, 421), True, 'import numpy as np\n'), ((314, 330), 'numpy.arange', 'np.arange', (['(H * W)'], {}), '(H * W)\n', (323, 330), True, 'import numpy as np\n'), ((584, 598), 'numpy.argmin', 'np.argmin', (['dis'], {}), '(dis)\n', (593, 598), True, 'import numpy as np\n'), ((615, 639), 'numpy.reshape', 'np.reshape', (['clss', '(H, W)'], {}), '(clss, (H, W))\n', (625, 639), True, 'import numpy as np\n'), ((707, 733), 'cv2.imread', 'cv2.imread', (['"""../imori.jpg"""'], {}), "('../imori.jpg')\n", (717, 733), False, 'import cv2\n'), ((512, 546), 'numpy.sum', 'np.sum', (['((Cs - img[i]) ** 2)'], {'axis': '(1)'}), '((Cs - img[i]) ** 2, axis=1)\n', (518, 546), True, 'import numpy as np\n')] |
import numpy as np
from scipy import stats
def uma_função_fictícia():
"""Não faz nada, mas tem requisitos. :)"""
matriz1 = np.random.rand(5, 5)
print(stats.describe(matriz1))
if __name__ == '__main__':
uma_função_fictícia()
| [
"scipy.stats.describe",
"numpy.random.rand"
] | [((133, 153), 'numpy.random.rand', 'np.random.rand', (['(5)', '(5)'], {}), '(5, 5)\n', (147, 153), True, 'import numpy as np\n'), ((164, 187), 'scipy.stats.describe', 'stats.describe', (['matriz1'], {}), '(matriz1)\n', (178, 187), False, 'from scipy import stats\n')] |
"""Utilities used in the Kadenze Academy Course on Deep Learning w/ Tensorflow.
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
<NAME>
Copyright <NAME>, June 2016.
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import urllib
import numpy as np
import zipfile
import os
from scipy.io import wavfile
from scipy.misc import imsave
def download(path):
"""Use urllib to download a file.
Parameters
----------
path : str
Url to download
Returns
-------
path : str
Location of downloaded file.
"""
import os
from six.moves import urllib
fname = path.split('/')[-1]
if os.path.exists(fname):
return fname
print('Downloading ' + path)
def progress(count, block_size, total_size):
if count % 20 == 0:
print('Downloaded %02.02f/%02.02f MB' % (
count * block_size / 1024.0 / 1024.0,
total_size / 1024.0 / 1024.0), end='\r')
filepath, _ = urllib.request.urlretrieve(
path, filename=fname, reporthook=progress)
return filepath
def download_and_extract_tar(path, dst):
"""Download and extract a tar file.
Parameters
----------
path : str
Url to tar file to download.
dst : str
Location to save tar file contents.
"""
import tarfile
filepath = download(path)
if not os.path.exists(dst):
os.makedirs(dst)
tarfile.open(filepath, 'r:gz').extractall(dst)
def download_and_extract_zip(path, dst):
"""Download and extract a zip file.
Parameters
----------
path : str
Url to zip file to download.
dst : str
Location to save zip file contents.
"""
import zipfile
filepath = download(path)
if not os.path.exists(dst):
os.makedirs(dst)
zf = zipfile.ZipFile(file=filepath)
zf.extractall(dst)
def load_audio(filename, b_normalize=True):
"""Load the audiofile at the provided filename using scipy.io.wavfile.
Optionally normalizes the audio to the maximum value.
Parameters
----------
filename : str
File to load.
b_normalize : bool, optional
Normalize to the maximum value.
"""
sr, s = wavfile.read(filename)
if b_normalize:
s = s.astype(np.float32)
s = (s / np.max(np.abs(s)))
s -= np.mean(s)
return s
def corrupt(x):
"""Take an input tensor and add uniform masking.
Parameters
----------
x : Tensor/Placeholder
Input to corrupt.
Returns
-------
x_corrupted : Tensor
50 pct of values corrupted.
"""
return tf.multiply(x, tf.cast(tf.random_uniform(shape=tf.shape(x),
minval=0,
maxval=2,
dtype=tf.int32), tf.float32))
def interp(l, r, n_samples):
"""Intepolate between the arrays l and r, n_samples times.
Parameters
----------
l : np.ndarray
Left edge
r : np.ndarray
Right edge
n_samples : int
Number of samples
Returns
-------
arr : np.ndarray
Inteporalted array
"""
return np.array([
l + step_i / (n_samples - 1) * (r - l)
for step_i in range(n_samples)])
def make_latent_manifold(corners, n_samples):
"""Create a 2d manifold out of the provided corners: n_samples * n_samples.
Parameters
----------
corners : list of np.ndarray
The four corners to intepolate.
n_samples : int
Number of samples to use in interpolation.
Returns
-------
arr : np.ndarray
Stacked array of all 2D interpolated samples
"""
left = interp(corners[0], corners[1], n_samples)
right = interp(corners[2], corners[3], n_samples)
embedding = []
for row_i in range(n_samples):
embedding.append(interp(left[row_i], right[row_i], n_samples))
return np.vstack(embedding)
def imcrop_tosquare(img):
"""Make any image a square image.
Parameters
----------
img : np.ndarray
Input image to crop, assumed at least 2d.
Returns
-------
crop : np.ndarray
Cropped image.
"""
size = np.min(img.shape[:2])
extra = img.shape[:2] - size
crop = img
for i in np.flatnonzero(extra):
crop = np.take(crop, extra[i] // 2 + np.r_[:size], axis=i)
return crop
def slice_montage(montage, img_h, img_w, n_imgs):
"""Slice a montage image into n_img h x w images.
Performs the opposite of the montage function. Takes a montage image and
slices it back into a N x H x W x C image.
Parameters
----------
montage : np.ndarray
Montage image to slice.
img_h : int
Height of sliced image
img_w : int
Width of sliced image
n_imgs : int
Number of images to slice
Returns
-------
sliced : np.ndarray
Sliced images as 4d array.
"""
sliced_ds = []
for i in range(int(np.sqrt(n_imgs))):
for j in range(int(np.sqrt(n_imgs))):
sliced_ds.append(montage[
1 + i + i * img_h:1 + i + (i + 1) * img_h,
1 + j + j * img_w:1 + j + (j + 1) * img_w])
return np.array(sliced_ds)
def montage(images, saveto='montage.png'):
"""Draw all images as a montage separated by 1 pixel borders.
Also saves the file to the destination specified by `saveto`.
Parameters
----------
images : numpy.ndarray
Input array to create montage of. Array should be:
batch x height x width x channels.
saveto : str
Location to save the resulting montage image.
Returns
-------
m : numpy.ndarray
Montage image.
"""
if isinstance(images, list):
images = np.array(images)
img_h = images.shape[1]
img_w = images.shape[2]
n_plots = int(np.ceil(np.sqrt(images.shape[0])))
if len(images.shape) == 4 and images.shape[3] == 3:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1, 3)) * 0.5
elif len(images.shape) == 4 and images.shape[3] == 1:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1, 1)) * 0.5
elif len(images.shape) == 3:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1)) * 0.5
else:
raise ValueError('Could not parse image shape of {}'.format(
images.shape))
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < images.shape[0]:
this_img = images[this_filter]
m[1 + i + i * img_h:1 + i + (i + 1) * img_h,
1 + j + j * img_w:1 + j + (j + 1) * img_w] = this_img
imsave(arr=np.squeeze(m), name=saveto)
return m
def montage_filters(W):
"""Draws all filters (n_input * n_output filters) as a
montage image separated by 1 pixel borders.
Parameters
----------
W : Tensor
Input tensor to create montage of.
Returns
-------
m : numpy.ndarray
Montage image.
"""
W = np.reshape(W, [W.shape[0], W.shape[1], 1, W.shape[2] * W.shape[3]])
n_plots = int(np.ceil(np.sqrt(W.shape[-1])))
m = np.ones(
(W.shape[0] * n_plots + n_plots + 1,
W.shape[1] * n_plots + n_plots + 1)) * 0.5
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < W.shape[-1]:
m[1 + i + i * W.shape[0]:1 + i + (i + 1) * W.shape[0],
1 + j + j * W.shape[1]:1 + j + (j + 1) * W.shape[1]] = (
np.squeeze(W[:, :, :, this_filter]))
return m
def get_celeb_files(dst='img_align_celeba', max_images=100):
"""Download the first 100 images of the celeb dataset.
Files will be placed in a directory 'img_align_celeba' if one
doesn't exist.
Returns
-------
files : list of strings
Locations to the first 100 images of the celeb net dataset.
"""
# Create a directory
if not os.path.exists(dst):
os.mkdir(dst)
# Now perform the following 100 times:
for img_i in range(1, max_images + 1):
# create a string using the current loop counter
f = '000%03d.jpg' % img_i
if not os.path.exists(os.path.join(dst, f)):
# and get the url with that string appended the end
url = 'https://s3.amazonaws.com/cadl/celeb-align/' + f
# We'll print this out to the console so we can see how far we've gone
print(url, end='\r')
# And now download the url to a location inside our new directory
urllib.request.urlretrieve(url, os.path.join(dst, f))
files = [os.path.join(dst, file_i)
for file_i in os.listdir(dst)
if '.jpg' in file_i][:max_images]
return files
def get_celeb_imgs(max_images=100):
"""Load the first `max_images` images of the celeb dataset.
Returns
-------
imgs : list of np.ndarray
List of the first 100 images from the celeb dataset
"""
return [plt.imread(f_i) for f_i in get_celeb_files(max_images=max_images)]
def gauss(mean, stddev, ksize):
"""Use Tensorflow to compute a Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed Gaussian Kernel using Tensorflow.
"""
g = tf.Graph()
with tf.Session(graph=g):
x = tf.linspace(-3.0, 3.0, ksize)
z = (tf.exp(tf.negative(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(stddev, 2.0)))) *
(1.0 / (stddev * tf.sqrt(2.0 * 3.1415))))
return z.eval()
def gauss2d(mean, stddev, ksize):
"""Use Tensorflow to compute a 2D Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed 2D Gaussian Kernel using Tensorflow.
"""
z = gauss(mean, stddev, ksize)
g = tf.Graph()
with tf.Session(graph=g):
z_2d = tf.matmul(tf.reshape(z, [ksize, 1]), tf.reshape(z, [1, ksize]))
return z_2d.eval()
def convolve(img, kernel):
"""Use Tensorflow to convolve a 4D image with a 4D kernel.
Parameters
----------
img : np.ndarray
4-dimensional image shaped N x H x W x C
kernel : np.ndarray
4-dimensional image shape K_H, K_W, C_I, C_O corresponding to the
kernel's height and width, the number of input channels, and the
number of output channels. Note that C_I should = C.
Returns
-------
result : np.ndarray
Convolved result.
"""
g = tf.Graph()
with tf.Session(graph=g):
convolved = tf.nn.conv2d(img, kernel, strides=[1, 1, 1, 1], padding='SAME')
res = convolved.eval()
return res
def gabor(ksize=32):
"""Use Tensorflow to compute a 2D Gabor Kernel.
Parameters
----------
ksize : int, optional
Size of kernel.
Returns
-------
gabor : np.ndarray
Gabor kernel with ksize x ksize dimensions.
"""
g = tf.Graph()
with tf.Session(graph=g):
z_2d = gauss2d(0.0, 1.0, ksize)
ones = tf.ones((1, ksize))
ys = tf.sin(tf.linspace(-3.0, 3.0, ksize))
ys = tf.reshape(ys, [ksize, 1])
wave = tf.matmul(ys, ones)
gabor = tf.multiply(wave, z_2d)
return gabor.eval()
def build_submission(filename, file_list, optional_file_list=()):
"""Helper utility to check homework assignment submissions and package them.
Parameters
----------
filename : str
Output zip file name
file_list : tuple
Tuple of files to include
"""
# check each file exists
for part_i, file_i in enumerate(file_list):
if not os.path.exists(file_i):
print('\nYou are missing the file {}. '.format(file_i) +
'It does not look like you have completed Part {}.'.format(
part_i + 1))
def zipdir(path, zf):
for root, dirs, files in os.walk(path):
for file in files:
# make sure the files are part of the necessary file list
if file.endswith(file_list) or file.endswith(optional_file_list):
zf.write(os.path.join(root, file))
# create a zip file with the necessary files
zipf = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
zipdir('.', zipf)
zipf.close()
print('Your assignment zip file has been created!')
print('Now submit the file:\n{}\nto Kadenze for grading!'.format(
os.path.abspath(filename)))
def normalize(a, s=0.1):
'''Normalize the image range for visualization'''
return np.uint8(np.clip(
(a - a.mean()) / max(a.std(), 1e-4) * s + 0.5,
0, 1) * 255)
# %%
def weight_variable(shape, **kwargs):
'''Helper function to create a weight variable initialized with
a normal distribution
Parameters
----------
shape : list
Size of weight variable
'''
if isinstance(shape, list):
initial = tf.random_normal(tf.stack(shape), mean=0.0, stddev=0.01)
initial.set_shape(shape)
else:
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial, **kwargs)
# %%
def bias_variable(shape, **kwargs):
'''Helper function to create a bias variable initialized with
a constant value.
Parameters
----------
shape : list
Size of weight variable
'''
if isinstance(shape, list):
initial = tf.random_normal(tf.stack(shape), mean=0.0, stddev=0.01)
initial.set_shape(shape)
else:
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial, **kwargs)
def binary_cross_entropy(z, x, name=None):
"""Binary Cross Entropy measures cross entropy of a binary variable.
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Parameters
----------
z : tf.Tensor
A `Tensor` of the same type and shape as `x`.
x : tf.Tensor
A `Tensor` of type `float32` or `float64`.
"""
with tf.variable_scope(name or 'bce'):
eps = 1e-12
return (-(x * tf.log(z + eps) +
(1. - x) * tf.log(1. - z + eps)))
def conv2d(x, n_output,
k_h=5, k_w=5, d_h=2, d_w=2,
padding='SAME', name='conv2d', reuse=None):
"""Helper for creating a 2d convolution operation.
Parameters
----------
x : tf.Tensor
Input tensor to convolve.
n_output : int
Number of filters.
k_h : int, optional
Kernel height
k_w : int, optional
Kernel width
d_h : int, optional
Height stride
d_w : int, optional
Width stride
padding : str, optional
Padding type: "SAME" or "VALID"
name : str, optional
Variable scope
Returns
-------
op : tf.Tensor
Output of convolution
"""
with tf.variable_scope(name or 'conv2d', reuse=reuse):
W = tf.get_variable(
name='W',
shape=[k_h, k_w, x.get_shape()[-1], n_output],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d(
name='conv',
input=x,
filter=W,
strides=[1, d_h, d_w, 1],
padding=padding)
b = tf.get_variable(
name='b',
shape=[n_output],
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(
name='h',
value=conv,
bias=b)
return h, W
def deconv2d(x, n_output_h, n_output_w, n_output_ch, n_input_ch=None,
k_h=5, k_w=5, d_h=2, d_w=2,
padding='SAME', name='deconv2d', reuse=None):
"""Deconvolution helper.
Parameters
----------
x : tf.Tensor
Input tensor to convolve.
n_output_h : int
Height of output
n_output_w : int
Width of output
n_output_ch : int
Number of filters.
k_h : int, optional
Kernel height
k_w : int, optional
Kernel width
d_h : int, optional
Height stride
d_w : int, optional
Width stride
padding : str, optional
Padding type: "SAME" or "VALID"
name : str, optional
Variable scope
Returns
-------
op : tf.Tensor
Output of deconvolution
"""
with tf.variable_scope(name or 'deconv2d', reuse=reuse):
W = tf.get_variable(
name='W',
shape=[k_h, k_w, n_output_ch, n_input_ch or x.get_shape()[-1]],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d_transpose(
name='conv_t',
value=x,
filter=W,
output_shape=tf.stack(
[tf.shape(x)[0], n_output_h, n_output_w, n_output_ch]),
strides=[1, d_h, d_w, 1],
padding=padding)
conv.set_shape([None, n_output_h, n_output_w, n_output_ch])
b = tf.get_variable(
name='b',
shape=[n_output_ch],
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(name='h', value=conv, bias=b)
return h, W
def lrelu(features, leak=0.2):
"""Leaky rectifier.
Parameters
----------
features : tf.Tensor
Input to apply leaky rectifier to.
leak : float, optional
Percentage of leak.
Returns
-------
op : tf.Tensor
Resulting output of applying leaky rectifier activation.
"""
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * features + f2 * abs(features)
def linear(x, n_output, name=None, activation=None, reuse=None):
"""Fully connected layer.
Parameters
----------
x : tf.Tensor
Input tensor to connect
n_output : int
Number of output neurons
name : None, optional
Scope to apply
Returns
-------
h, W : tf.Tensor, tf.Tensor
Output of fully connected layer and the weight matrix
"""
if len(x.get_shape()) != 2:
x = flatten(x, reuse=reuse)
n_input = x.get_shape().as_list()[1]
with tf.variable_scope(name or "fc", reuse=reuse):
W = tf.get_variable(
name='W',
shape=[n_input, n_output],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(
name='b',
shape=[n_output],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(
name='h',
value=tf.matmul(x, W),
bias=b)
if activation:
h = activation(h)
return h, W
def flatten(x, name=None, reuse=None):
"""Flatten Tensor to 2-dimensions.
Parameters
----------
x : tf.Tensor
Input tensor to flatten.
name : None, optional
Variable scope for flatten operations
Returns
-------
flattened : tf.Tensor
Flattened tensor.
"""
with tf.variable_scope('flatten'):
dims = x.get_shape().as_list()
if len(dims) == 4:
flattened = tf.reshape(
x,
shape=[-1, dims[1] * dims[2] * dims[3]])
elif len(dims) == 2 or len(dims) == 1:
flattened = x
else:
raise ValueError('Expected n dimensions of 1, 2 or 4. Found:',
len(dims))
return flattened
def to_tensor(x):
"""Convert 2 dim Tensor to a 4 dim Tensor ready for convolution.
Performs the opposite of flatten(x). If the tensor is already 4-D, this
returns the same as the input, leaving it unchanged.
Parameters
----------
x : tf.Tesnor
Input 2-D tensor. If 4-D already, left unchanged.
Returns
-------
x : tf.Tensor
4-D representation of the input.
Raises
------
ValueError
If the tensor is not 2D or already 4D.
"""
if len(x.get_shape()) == 2:
n_input = x.get_shape().as_list()[1]
x_dim = np.sqrt(n_input)
if x_dim == int(x_dim):
x_dim = int(x_dim)
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, 1], name='reshape')
elif np.sqrt(n_input / 3) == int(np.sqrt(n_input / 3)):
x_dim = int(np.sqrt(n_input / 3))
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, 3], name='reshape')
else:
x_tensor = tf.reshape(
x, [-1, 1, 1, n_input], name='reshape')
elif len(x.get_shape()) == 4:
x_tensor = x
else:
raise ValueError('Unsupported input dimensions')
return x_tensor
| [
"tarfile.open",
"numpy.sqrt",
"tensorflow.shape",
"zipfile.ZipFile",
"tensorflow.multiply",
"numpy.array",
"tensorflow.log",
"os.walk",
"os.path.exists",
"tensorflow.Graph",
"numpy.mean",
"numpy.reshape",
"tensorflow.random_normal",
"os.listdir",
"tensorflow.pow",
"tensorflow.Session",... | [((664, 685), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (678, 685), False, 'import os\n'), ((1004, 1073), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['path'], {'filename': 'fname', 'reporthook': 'progress'}), '(path, filename=fname, reporthook=progress)\n', (1030, 1073), False, 'from six.moves import urllib\n'), ((2250, 2272), 'scipy.io.wavfile.read', 'wavfile.read', (['filename'], {}), '(filename)\n', (2262, 2272), False, 'from scipy.io import wavfile\n'), ((4000, 4020), 'numpy.vstack', 'np.vstack', (['embedding'], {}), '(embedding)\n', (4009, 4020), True, 'import numpy as np\n'), ((4278, 4299), 'numpy.min', 'np.min', (['img.shape[:2]'], {}), '(img.shape[:2])\n', (4284, 4299), True, 'import numpy as np\n'), ((4361, 4382), 'numpy.flatnonzero', 'np.flatnonzero', (['extra'], {}), '(extra)\n', (4375, 4382), True, 'import numpy as np\n'), ((5298, 5317), 'numpy.array', 'np.array', (['sliced_ds'], {}), '(sliced_ds)\n', (5306, 5317), True, 'import numpy as np\n'), ((7343, 7410), 'numpy.reshape', 'np.reshape', (['W', '[W.shape[0], W.shape[1], 1, W.shape[2] * W.shape[3]]'], {}), '(W, [W.shape[0], W.shape[1], 1, W.shape[2] * W.shape[3]])\n', (7353, 7410), True, 'import numpy as np\n'), ((9845, 9855), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (9853, 9855), True, 'import tensorflow as tf\n'), ((10580, 10590), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (10588, 10590), True, 'import tensorflow as tf\n'), ((11244, 11254), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (11252, 11254), True, 'import tensorflow as tf\n'), ((11687, 11697), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (11695, 11697), True, 'import tensorflow as tf\n'), ((12960, 13012), 'zipfile.ZipFile', 'zipfile.ZipFile', (['filename', '"""w"""', 'zipfile.ZIP_DEFLATED'], {}), "(filename, 'w', zipfile.ZIP_DEFLATED)\n", (12975, 13012), False, 'import zipfile\n'), ((13852, 13882), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial, **kwargs)\n', (13863, 13882), True, 'import tensorflow as tf\n'), ((14327, 14357), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial, **kwargs)\n', (14338, 14357), True, 'import tensorflow as tf\n'), ((1395, 1414), 'os.path.exists', 'os.path.exists', (['dst'], {}), '(dst)\n', (1409, 1414), False, 'import os\n'), ((1424, 1440), 'os.makedirs', 'os.makedirs', (['dst'], {}), '(dst)\n', (1435, 1440), False, 'import os\n'), ((1788, 1807), 'os.path.exists', 'os.path.exists', (['dst'], {}), '(dst)\n', (1802, 1807), False, 'import os\n'), ((1817, 1833), 'os.makedirs', 'os.makedirs', (['dst'], {}), '(dst)\n', (1828, 1833), False, 'import os\n'), ((1847, 1877), 'zipfile.ZipFile', 'zipfile.ZipFile', ([], {'file': 'filepath'}), '(file=filepath)\n', (1862, 1877), False, 'import zipfile\n'), ((2375, 2385), 'numpy.mean', 'np.mean', (['s'], {}), '(s)\n', (2382, 2385), True, 'import numpy as np\n'), ((4399, 4450), 'numpy.take', 'np.take', (['crop', '(extra[i] // 2 + np.r_[:size])'], {'axis': 'i'}), '(crop, extra[i] // 2 + np.r_[:size], axis=i)\n', (4406, 4450), True, 'import numpy as np\n'), ((5856, 5872), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (5864, 5872), True, 'import numpy as np\n'), ((7468, 7553), 'numpy.ones', 'np.ones', (['(W.shape[0] * n_plots + n_plots + 1, W.shape[1] * n_plots + n_plots + 1)'], {}), '((W.shape[0] * n_plots + n_plots + 1, W.shape[1] * n_plots + n_plots +\n 1))\n', (7475, 7553), True, 'import numpy as np\n'), ((8309, 8328), 'os.path.exists', 'os.path.exists', (['dst'], {}), '(dst)\n', (8323, 8328), False, 'import os\n'), ((8338, 8351), 'os.mkdir', 'os.mkdir', (['dst'], {}), '(dst)\n', (8346, 8351), False, 'import os\n'), ((9363, 9378), 'matplotlib.pyplot.imread', 'plt.imread', (['f_i'], {}), '(f_i)\n', (9373, 9378), True, 'import matplotlib.pyplot as plt\n'), ((9865, 9884), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g'}), '(graph=g)\n', (9875, 9884), True, 'import tensorflow as tf\n'), ((9898, 9927), 'tensorflow.linspace', 'tf.linspace', (['(-3.0)', '(3.0)', 'ksize'], {}), '(-3.0, 3.0, ksize)\n', (9909, 9927), True, 'import tensorflow as tf\n'), ((10600, 10619), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g'}), '(graph=g)\n', (10610, 10619), True, 'import tensorflow as tf\n'), ((11264, 11283), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g'}), '(graph=g)\n', (11274, 11283), True, 'import tensorflow as tf\n'), ((11305, 11368), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['img', 'kernel'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(img, kernel, strides=[1, 1, 1, 1], padding='SAME')\n", (11317, 11368), True, 'import tensorflow as tf\n'), ((11707, 11726), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g'}), '(graph=g)\n', (11717, 11726), True, 'import tensorflow as tf\n'), ((11783, 11802), 'tensorflow.ones', 'tf.ones', (['(1, ksize)'], {}), '((1, ksize))\n', (11790, 11802), True, 'import tensorflow as tf\n'), ((11867, 11893), 'tensorflow.reshape', 'tf.reshape', (['ys', '[ksize, 1]'], {}), '(ys, [ksize, 1])\n', (11877, 11893), True, 'import tensorflow as tf\n'), ((11909, 11928), 'tensorflow.matmul', 'tf.matmul', (['ys', 'ones'], {}), '(ys, ones)\n', (11918, 11928), True, 'import tensorflow as tf\n'), ((11945, 11968), 'tensorflow.multiply', 'tf.multiply', (['wave', 'z_2d'], {}), '(wave, z_2d)\n', (11956, 11968), True, 'import tensorflow as tf\n'), ((12642, 12655), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (12649, 12655), False, 'import os\n'), ((13794, 13840), 'tensorflow.random_normal', 'tf.random_normal', (['shape'], {'mean': '(0.0)', 'stddev': '(0.01)'}), '(shape, mean=0.0, stddev=0.01)\n', (13810, 13840), True, 'import tensorflow as tf\n'), ((14269, 14315), 'tensorflow.random_normal', 'tf.random_normal', (['shape'], {'mean': '(0.0)', 'stddev': '(0.01)'}), '(shape, mean=0.0, stddev=0.01)\n', (14285, 14315), True, 'import tensorflow as tf\n'), ((14739, 14771), 'tensorflow.variable_scope', 'tf.variable_scope', (["(name or 'bce')"], {}), "(name or 'bce')\n", (14756, 14771), True, 'import tensorflow as tf\n'), ((15578, 15626), 'tensorflow.variable_scope', 'tf.variable_scope', (["(name or 'conv2d')"], {'reuse': 'reuse'}), "(name or 'conv2d', reuse=reuse)\n", (15595, 15626), True, 'import tensorflow as tf\n'), ((15825, 15916), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'name': '"""conv"""', 'input': 'x', 'filter': 'W', 'strides': '[1, d_h, d_w, 1]', 'padding': 'padding'}), "(name='conv', input=x, filter=W, strides=[1, d_h, d_w, 1],\n padding=padding)\n", (15837, 15916), True, 'import tensorflow as tf\n'), ((16123, 16167), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', ([], {'name': '"""h"""', 'value': 'conv', 'bias': 'b'}), "(name='h', value=conv, bias=b)\n", (16137, 16167), True, 'import tensorflow as tf\n'), ((17037, 17087), 'tensorflow.variable_scope', 'tf.variable_scope', (["(name or 'deconv2d')"], {'reuse': 'reuse'}), "(name or 'deconv2d', reuse=reuse)\n", (17054, 17087), True, 'import tensorflow as tf\n'), ((17792, 17836), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', ([], {'name': '"""h"""', 'value': 'conv', 'bias': 'b'}), "(name='h', value=conv, bias=b)\n", (17806, 17836), True, 'import tensorflow as tf\n'), ((18806, 18850), 'tensorflow.variable_scope', 'tf.variable_scope', (["(name or 'fc')"], {'reuse': 'reuse'}), "(name or 'fc', reuse=reuse)\n", (18823, 18850), True, 'import tensorflow as tf\n'), ((19711, 19739), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""flatten"""'], {}), "('flatten')\n", (19728, 19739), True, 'import tensorflow as tf\n'), ((20750, 20766), 'numpy.sqrt', 'np.sqrt', (['n_input'], {}), '(n_input)\n', (20757, 20766), True, 'import numpy as np\n'), ((5065, 5080), 'numpy.sqrt', 'np.sqrt', (['n_imgs'], {}), '(n_imgs)\n', (5072, 5080), True, 'import numpy as np\n'), ((5955, 5979), 'numpy.sqrt', 'np.sqrt', (['images.shape[0]'], {}), '(images.shape[0])\n', (5962, 5979), True, 'import numpy as np\n'), ((6050, 6148), 'numpy.ones', 'np.ones', (['(images.shape[1] * n_plots + n_plots + 1, images.shape[2] * n_plots +\n n_plots + 1, 3)'], {}), '((images.shape[1] * n_plots + n_plots + 1, images.shape[2] * n_plots +\n n_plots + 1, 3))\n', (6057, 6148), True, 'import numpy as np\n'), ((6994, 7007), 'numpy.squeeze', 'np.squeeze', (['m'], {}), '(m)\n', (7004, 7007), True, 'import numpy as np\n'), ((7437, 7457), 'numpy.sqrt', 'np.sqrt', (['W.shape[-1]'], {}), '(W.shape[-1])\n', (7444, 7457), True, 'import numpy as np\n'), ((8993, 9018), 'os.path.join', 'os.path.join', (['dst', 'file_i'], {}), '(dst, file_i)\n', (9005, 9018), False, 'import os\n'), ((10646, 10671), 'tensorflow.reshape', 'tf.reshape', (['z', '[ksize, 1]'], {}), '(z, [ksize, 1])\n', (10656, 10671), True, 'import tensorflow as tf\n'), ((10673, 10698), 'tensorflow.reshape', 'tf.reshape', (['z', '[1, ksize]'], {}), '(z, [1, ksize])\n', (10683, 10698), True, 'import tensorflow as tf\n'), ((11823, 11852), 'tensorflow.linspace', 'tf.linspace', (['(-3.0)', '(3.0)', 'ksize'], {}), '(-3.0, 3.0, ksize)\n', (11834, 11852), True, 'import tensorflow as tf\n'), ((12381, 12403), 'os.path.exists', 'os.path.exists', (['file_i'], {}), '(file_i)\n', (12395, 12403), False, 'import os\n'), ((13186, 13211), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (13201, 13211), False, 'import os\n'), ((13693, 13708), 'tensorflow.stack', 'tf.stack', (['shape'], {}), '(shape)\n', (13701, 13708), True, 'import tensorflow as tf\n'), ((14168, 14183), 'tensorflow.stack', 'tf.stack', (['shape'], {}), '(shape)\n', (14176, 14183), True, 'import tensorflow as tf\n'), ((19831, 19885), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '[-1, dims[1] * dims[2] * dims[3]]'}), '(x, shape=[-1, dims[1] * dims[2] * dims[3]])\n', (19841, 19885), True, 'import tensorflow as tf\n'), ((20853, 20905), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, x_dim, x_dim, 1]'], {'name': '"""reshape"""'}), "(x, [-1, x_dim, x_dim, 1], name='reshape')\n", (20863, 20905), True, 'import tensorflow as tf\n'), ((1449, 1479), 'tarfile.open', 'tarfile.open', (['filepath', '"""r:gz"""'], {}), "(filepath, 'r:gz')\n", (1461, 1479), False, 'import tarfile\n'), ((2350, 2359), 'numpy.abs', 'np.abs', (['s'], {}), '(s)\n', (2356, 2359), True, 'import numpy as np\n'), ((5111, 5126), 'numpy.sqrt', 'np.sqrt', (['n_imgs'], {}), '(n_imgs)\n', (5118, 5126), True, 'import numpy as np\n'), ((6247, 6345), 'numpy.ones', 'np.ones', (['(images.shape[1] * n_plots + n_plots + 1, images.shape[2] * n_plots +\n n_plots + 1, 1)'], {}), '((images.shape[1] * n_plots + n_plots + 1, images.shape[2] * n_plots +\n n_plots + 1, 1))\n', (6254, 6345), True, 'import numpy as np\n'), ((7886, 7921), 'numpy.squeeze', 'np.squeeze', (['W[:, :, :, this_filter]'], {}), '(W[:, :, :, this_filter])\n', (7896, 7921), True, 'import numpy as np\n'), ((8562, 8582), 'os.path.join', 'os.path.join', (['dst', 'f'], {}), '(dst, f)\n', (8574, 8582), False, 'import os\n'), ((8957, 8977), 'os.path.join', 'os.path.join', (['dst', 'f'], {}), '(dst, f)\n', (8969, 8977), False, 'import os\n'), ((9046, 9061), 'os.listdir', 'os.listdir', (['dst'], {}), '(dst)\n', (9056, 9061), False, 'import os\n'), ((15762, 15807), 'tensorflow.contrib.layers.xavier_initializer_conv2d', 'tf.contrib.layers.xavier_initializer_conv2d', ([], {}), '()\n', (15805, 15807), True, 'import tensorflow as tf\n'), ((16080, 16108), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (16103, 16108), True, 'import tensorflow as tf\n'), ((17240, 17285), 'tensorflow.contrib.layers.xavier_initializer_conv2d', 'tf.contrib.layers.xavier_initializer_conv2d', ([], {}), '()\n', (17283, 17285), True, 'import tensorflow as tf\n'), ((17749, 17777), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (17772, 17777), True, 'import tensorflow as tf\n'), ((18996, 19034), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (19032, 19034), True, 'import tensorflow as tf\n'), ((19172, 19200), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (19195, 19200), True, 'import tensorflow as tf\n'), ((19271, 19286), 'tensorflow.matmul', 'tf.matmul', (['x', 'W'], {}), '(x, W)\n', (19280, 19286), True, 'import tensorflow as tf\n'), ((20936, 20956), 'numpy.sqrt', 'np.sqrt', (['(n_input / 3)'], {}), '(n_input / 3)\n', (20943, 20956), True, 'import numpy as np\n'), ((21056, 21108), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, x_dim, x_dim, 3]'], {'name': '"""reshape"""'}), "(x, [-1, x_dim, x_dim, 3], name='reshape')\n", (21066, 21108), True, 'import tensorflow as tf\n'), ((21163, 21213), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, 1, 1, n_input]'], {'name': '"""reshape"""'}), "(x, [-1, 1, 1, n_input], name='reshape')\n", (21173, 21213), True, 'import tensorflow as tf\n'), ((2705, 2716), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (2713, 2716), True, 'import tensorflow as tf\n'), ((6419, 6514), 'numpy.ones', 'np.ones', (['(images.shape[1] * n_plots + n_plots + 1, images.shape[2] * n_plots +\n n_plots + 1)'], {}), '((images.shape[1] * n_plots + n_plots + 1, images.shape[2] * n_plots +\n n_plots + 1))\n', (6426, 6514), True, 'import numpy as np\n'), ((10073, 10094), 'tensorflow.sqrt', 'tf.sqrt', (['(2.0 * 3.1415)'], {}), '(2.0 * 3.1415)\n', (10080, 10094), True, 'import tensorflow as tf\n'), ((14815, 14830), 'tensorflow.log', 'tf.log', (['(z + eps)'], {}), '(z + eps)\n', (14821, 14830), True, 'import tensorflow as tf\n'), ((14862, 14883), 'tensorflow.log', 'tf.log', (['(1.0 - z + eps)'], {}), '(1.0 - z + eps)\n', (14868, 14883), True, 'import tensorflow as tf\n'), ((20964, 20984), 'numpy.sqrt', 'np.sqrt', (['(n_input / 3)'], {}), '(n_input / 3)\n', (20971, 20984), True, 'import numpy as np\n'), ((21011, 21031), 'numpy.sqrt', 'np.sqrt', (['(n_input / 3)'], {}), '(n_input / 3)\n', (21018, 21031), True, 'import numpy as np\n'), ((9960, 9981), 'tensorflow.pow', 'tf.pow', (['(x - mean)', '(2.0)'], {}), '(x - mean, 2.0)\n', (9966, 9981), True, 'import tensorflow as tf\n'), ((12873, 12897), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (12885, 12897), False, 'import os\n'), ((10018, 10037), 'tensorflow.pow', 'tf.pow', (['stddev', '(2.0)'], {}), '(stddev, 2.0)\n', (10024, 10037), True, 'import tensorflow as tf\n'), ((17449, 17460), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (17457, 17460), True, 'import tensorflow as tf\n')] |
import numpy as np
import logging
from collections import Counter
import pandas as pd
import jieba
import shelve
import gensim
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# model = gensim.models.word2vec.Word2Vec.load('F:\\YX\\word2vec\\word2vec\\word2vec_wx')
# 此函数计算某词对于模型中各个词的转移概率p(wk|wi)
def predict_proba(oword, iword):
# 获取输入词的词向量
iword_vec = model[iword]
# 获取保存权重的词的词库
oword = model.wv.vocab[oword]
oword_l = model.trainables.syn1[oword.point].T
dot = np.dot(iword_vec, oword_l)
lprob = -sum(np.logaddexp(0, -dot) + oword.code*dot)
return lprob
# 各个词对于某词wi转移概率的乘积即为p(content|wi),
# 如果p(content|wi)越大就说明在出现wi这个词的条件下,此内容概率越大,
# 那么把所有词的p(content|wi)按照大小降序排列,越靠前的词就越重要,越应该看成是本文的关键词。
def keywords(s):
# 抽出s中和与训练的model重叠的词
s = [w for w in s if w in model]
ws = {w: sum([predict_proba(u, w) for u in s]) for w in s}
return Counter(ws).most_common()
def output_word(w1):
# word2vec不需要去除停用词
x = pd.Series(keywords(jieba.cut(w1)))
word2vec_keys = []
# 输出最重要的前n个词
n = 10
cnt = 0
for i in x:
cnt += 1
if cnt > n:
break
word2vec_keys.append(i[0])
shelve_file = shelve.open('json/word2vec.shlve')
shelve_file['word2vec_keys'] = word2vec_keys
shelve_file.close()
| [
"logging.basicConfig",
"jieba.cut",
"numpy.logaddexp",
"collections.Counter",
"numpy.dot",
"shelve.open"
] | [((134, 229), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.INFO)\n", (153, 229), False, 'import logging\n'), ((535, 561), 'numpy.dot', 'np.dot', (['iword_vec', 'oword_l'], {}), '(iword_vec, oword_l)\n', (541, 561), True, 'import numpy as np\n'), ((1187, 1221), 'shelve.open', 'shelve.open', (['"""json/word2vec.shlve"""'], {}), "('json/word2vec.shlve')\n", (1198, 1221), False, 'import shelve\n'), ((921, 932), 'collections.Counter', 'Counter', (['ws'], {}), '(ws)\n', (928, 932), False, 'from collections import Counter\n'), ((1019, 1032), 'jieba.cut', 'jieba.cut', (['w1'], {}), '(w1)\n', (1028, 1032), False, 'import jieba\n'), ((577, 598), 'numpy.logaddexp', 'np.logaddexp', (['(0)', '(-dot)'], {}), '(0, -dot)\n', (589, 598), True, 'import numpy as np\n')] |
# Import base tools
import os
## Note, for mac osx compatability import something from shapely.geometry before importing fiona or geopandas
## https://github.com/Toblerity/Shapely/issues/553 * Import shapely before rasterio or fioana
from shapely import geometry
import rasterio
import random
from cw_tiler import main
from cw_tiler import utils
from cw_tiler import vector_utils
from cw_nets.Ternaus_tools import tn_tools
import numpy as np
import os
import random
import torch
import json
import logging
import time
import io
from tqdm import tqdm
# Setting Certificate Location for Ubuntu/Mac OS locations (Rasterio looks for certs in centos locations)
os.environ['CURL_CA_BUNDLE']='/etc/ssl/certs/ca-certificates.crt'
logger = logging.getLogger(__name__)
def get_processing_details(rasterPath, smallExample=False,
dstkwargs={"nodata": 0,
"interleave": "pixel",
"tiled": True,
"blockxsize": 512,
"blockysize": 512,
"compress": "LZW"}):
with rasterio.open(rasterPath) as src:
# Get Lat, Lon bounds of the Raster (src)
wgs_bounds = utils.get_wgs84_bounds(src)
# Use Lat, Lon location of Image to get UTM Zone/ UTM projection
utm_crs = utils.calculate_UTM_crs(wgs_bounds)
# Calculate Raster bounds in UTM coordinates
utm_bounds = utils.get_utm_bounds(src, utm_crs)
vrt_profile = utils.get_utm_vrt_profile(src,
crs=utm_crs,
)
dst_profile = vrt_profile
dst_profile.update({'count': 1,
'dtype': rasterio.uint8,
'driver': "GTiff",
})
# update for CogStandard
dst_profile.update(dstkwargs)
# open s3 Location
rasterBounds = geometry.box(*utm_bounds)
if smallExample:
rasterBounds = geometry.box(*rasterBounds.centroid.buffer(1000).bounds)
return rasterBounds, dst_profile
def generate_cells_list_dict(rasterBounds, cell_size_meters, stride_size_meters, tile_size_pixels, quad_space=True):
cells_list_dict = main.calculate_analysis_grid(rasterBounds.bounds,
stride_size_meters=stride_size_meters,
cell_size_meters=cell_size_meters,
quad_space=True)
return cells_list_dict
def createRasterMask(rasterPath,
cells_list_dict,
dataLocation,
outputName,
dst_profile,
modelPath,
tile_size_pixels,
logger=None):
logger = logger or logging.getLogger(__name__)
mask_dict_list = []
model = tn_tools.get_model(modelPath)
outputTifMask = os.path.join(dataLocation, outputName.replace('.tif', '_mask.tif'))
outputTifCountour = os.path.join(dataLocation, outputName.replace('.tif', '_contour.tif'))
outputTifCount = os.path.join(dataLocation, outputName.replace('.tif', '_count.tif'))
# define Image_transform for Tile
img_transform = tn_tools.get_img_transform()
# Open Raster File
with rasterio.open(rasterPath) as src:
for cells_list_id, cells_list in cells_list_dict.items():
outputTifMask = os.path.join(dataLocation, outputName.replace('.tif', '{}_mask.tif'.format(cells_list_id)))
outputTifCountour = os.path.join(dataLocation, outputName.replace('.tif', '{}_contour.tif'.format(cells_list_id)))
outputTifCount = os.path.join(dataLocation, outputName.replace('.tif', '{}_count.tif'.format(cells_list_id)))
# Open Results TIF
with rasterio.open(outputTifMask,
'w',
**dst_profile) as dst, \
rasterio.open(outputTifCountour,
'w',
**dst_profile) as dst_countour, \
rasterio.open(outputTifCount,
'w',
**dst_profile) as dst_count:
src_profile = src.profile
print("start interating through {} cells".format(len(cells_list_dict[0])))
for cell_selection in tqdm(cells_list):
# Break up cell into four gorners
ll_x, ll_y, ur_x, ur_y = cell_selection
# Get Tile from bounding box
tile, mask, window, window_transform = main.tile_utm(src, ll_x, ll_y, ur_x, ur_y, indexes=None, tilesize=tile_size_pixels, nodata=None, alpha=None,
dst_crs=dst_profile['crs'])
img = tn_tools.reform_tile(tile)
img, pads = tn_tools.pad(img)
input_img = torch.unsqueeze(img_transform(img / 255).cuda(), dim=0)
predictDict = tn_tools.predict(model, input_img, pads)
# Returns predictDict = {'mask': mask, # Polygon Results for detection of buildings
# 'contour': contour, # Contour results for detecting edge of buildings
# 'seed': seed, # Mix of Contour and Mask for used by watershed function
# 'labels': labels # Result of watershed function
#}
try:
dst.write(tn_tools.unpad(predictDict['mask'], pads).astype(np.uint8), window=window, indexes=1)
dst_countour.write(tn_tools.unpad(predictDict['seed'], pads).astype(np.uint8), window=window, indexes=1)
dst_count.write(np.ones(predictDict['labels'].shape).astype(np.uint8), window=window, indexes=1)
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
logger.error("Failed To write tile:")
logger.error("Failed window: {}".format(window))
logger.error("Failed cell_section: {}".format(cell_selection))
resultDict = {'mask': outputTifMask,
'contour': outputTifCountour,
'count': outputTifCount}
mask_dict_list.append(resultDict)
return mask_dict_list
def process_results_mask(mask_dict_list, outputNameTiff, delete_tmp=True):
firstCell = True
src_mask_list = []
src_countour_list = []
src_count_list = []
for resultDict in tqdm(mask_dict_list):
src_mask_list.append(rasterio.open(resultDict['mask']))
src_countour_list.append(rasterio.open(resultDict['contour']))
src_count_list.append(rasterio.open(resultDict['count']))
src_mask_profile = src_mask_list[0].profile
with rasterio.open(outputNameTiff,
'w',
**src_mask_profile) as dst:
windows = [window for ij, window in dst.block_windows()]
for window in tqdm(windows):
firstCell = True
for src_mask, src_contour, src_count in zip(src_mask_list, src_countour_list, src_count_list):
if firstCell:
data_mask = src_mask.read(window=window)
data_count = src_count.read(window=window)
firstCell = False
else:
data_mask += src_mask.read(window=window)
data_count += src_count.read(window=window)
data_mask=(data_mask/data_count).astype(np.uint8)
data_mask=(data_mask>=1.0).astype(np.uint8)
dst.write(data_mask, window=window)
resultDict = {'mask': outputNameTiff}
return resultDict
def polygonize_results_mask(maskDict):
results = []
#mask= data_mask==0
with rasterio.open(maskDict['mask']) as src:
src_profile = src.profile
image = src.read(1)
mask=image>0
for i, (geom, val) in tqdm(enumerate(rasterio.features.shapes(image, mask=mask, transform=src.transform))):
geom = rasterio.warp.transform_geom(src.crs, 'EPSG:4326', geom, precision=6)
results.append({
"type": "Feature",
'properties': {'raster_val': val},
'geometry': geom
}
)
return results, src_profile
def write_results_tojson(results, dst_name):
collection = {
'type': 'FeatureCollection',
'features': list(results) }
with open(dst_name, 'w') as dst:
json.dump(collection, dst)
| [
"logging.getLogger",
"cw_nets.Ternaus_tools.tn_tools.get_model",
"cw_nets.Ternaus_tools.tn_tools.predict",
"cw_nets.Ternaus_tools.tn_tools.unpad",
"cw_nets.Ternaus_tools.tn_tools.get_img_transform",
"shapely.geometry.box",
"cw_tiler.main.calculate_analysis_grid",
"cw_tiler.utils.calculate_UTM_crs",
... | [((735, 762), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (752, 762), False, 'import logging\n'), ((2042, 2067), 'shapely.geometry.box', 'geometry.box', (['*utm_bounds'], {}), '(*utm_bounds)\n', (2054, 2067), False, 'from shapely import geometry\n'), ((2357, 2502), 'cw_tiler.main.calculate_analysis_grid', 'main.calculate_analysis_grid', (['rasterBounds.bounds'], {'stride_size_meters': 'stride_size_meters', 'cell_size_meters': 'cell_size_meters', 'quad_space': '(True)'}), '(rasterBounds.bounds, stride_size_meters=\n stride_size_meters, cell_size_meters=cell_size_meters, quad_space=True)\n', (2385, 2502), False, 'from cw_tiler import main\n'), ((3070, 3099), 'cw_nets.Ternaus_tools.tn_tools.get_model', 'tn_tools.get_model', (['modelPath'], {}), '(modelPath)\n', (3088, 3099), False, 'from cw_nets.Ternaus_tools import tn_tools\n'), ((3433, 3461), 'cw_nets.Ternaus_tools.tn_tools.get_img_transform', 'tn_tools.get_img_transform', ([], {}), '()\n', (3459, 3461), False, 'from cw_nets.Ternaus_tools import tn_tools\n'), ((7289, 7309), 'tqdm.tqdm', 'tqdm', (['mask_dict_list'], {}), '(mask_dict_list)\n', (7293, 7309), False, 'from tqdm import tqdm\n'), ((1184, 1209), 'rasterio.open', 'rasterio.open', (['rasterPath'], {}), '(rasterPath)\n', (1197, 1209), False, 'import rasterio\n'), ((1290, 1317), 'cw_tiler.utils.get_wgs84_bounds', 'utils.get_wgs84_bounds', (['src'], {}), '(src)\n', (1312, 1317), False, 'from cw_tiler import utils\n'), ((1410, 1445), 'cw_tiler.utils.calculate_UTM_crs', 'utils.calculate_UTM_crs', (['wgs_bounds'], {}), '(wgs_bounds)\n', (1433, 1445), False, 'from cw_tiler import utils\n'), ((1522, 1556), 'cw_tiler.utils.get_utm_bounds', 'utils.get_utm_bounds', (['src', 'utm_crs'], {}), '(src, utm_crs)\n', (1542, 1556), False, 'from cw_tiler import utils\n'), ((1580, 1623), 'cw_tiler.utils.get_utm_vrt_profile', 'utils.get_utm_vrt_profile', (['src'], {'crs': 'utm_crs'}), '(src, crs=utm_crs)\n', (1605, 1623), False, 'from cw_tiler import utils\n'), ((2995, 3022), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3012, 3022), False, 'import logging\n'), ((3494, 3519), 'rasterio.open', 'rasterio.open', (['rasterPath'], {}), '(rasterPath)\n', (3507, 3519), False, 'import rasterio\n'), ((7623, 7677), 'rasterio.open', 'rasterio.open', (['outputNameTiff', '"""w"""'], {}), "(outputNameTiff, 'w', **src_mask_profile)\n", (7636, 7677), False, 'import rasterio\n'), ((7873, 7886), 'tqdm.tqdm', 'tqdm', (['windows'], {}), '(windows)\n', (7877, 7886), False, 'from tqdm import tqdm\n'), ((8783, 8814), 'rasterio.open', 'rasterio.open', (["maskDict['mask']"], {}), "(maskDict['mask'])\n", (8796, 8814), False, 'import rasterio\n'), ((9545, 9571), 'json.dump', 'json.dump', (['collection', 'dst'], {}), '(collection, dst)\n', (9554, 9571), False, 'import json\n'), ((7358, 7391), 'rasterio.open', 'rasterio.open', (["resultDict['mask']"], {}), "(resultDict['mask'])\n", (7371, 7391), False, 'import rasterio\n'), ((7426, 7462), 'rasterio.open', 'rasterio.open', (["resultDict['contour']"], {}), "(resultDict['contour'])\n", (7439, 7462), False, 'import rasterio\n'), ((7495, 7529), 'rasterio.open', 'rasterio.open', (["resultDict['count']"], {}), "(resultDict['count'])\n", (7508, 7529), False, 'import rasterio\n'), ((9041, 9110), 'rasterio.warp.transform_geom', 'rasterio.warp.transform_geom', (['src.crs', '"""EPSG:4326"""', 'geom'], {'precision': '(6)'}), "(src.crs, 'EPSG:4326', geom, precision=6)\n", (9069, 9110), False, 'import rasterio\n'), ((4014, 4062), 'rasterio.open', 'rasterio.open', (['outputTifMask', '"""w"""'], {}), "(outputTifMask, 'w', **dst_profile)\n", (4027, 4062), False, 'import rasterio\n'), ((4159, 4211), 'rasterio.open', 'rasterio.open', (['outputTifCountour', '"""w"""'], {}), "(outputTifCountour, 'w', **dst_profile)\n", (4172, 4211), False, 'import rasterio\n'), ((4317, 4366), 'rasterio.open', 'rasterio.open', (['outputTifCount', '"""w"""'], {}), "(outputTifCount, 'w', **dst_profile)\n", (4330, 4366), False, 'import rasterio\n'), ((4624, 4640), 'tqdm.tqdm', 'tqdm', (['cells_list'], {}), '(cells_list)\n', (4628, 4640), False, 'from tqdm import tqdm\n'), ((8951, 9018), 'rasterio.features.shapes', 'rasterio.features.shapes', (['image'], {'mask': 'mask', 'transform': 'src.transform'}), '(image, mask=mask, transform=src.transform)\n', (8975, 9018), False, 'import rasterio\n'), ((4890, 5031), 'cw_tiler.main.tile_utm', 'main.tile_utm', (['src', 'll_x', 'll_y', 'ur_x', 'ur_y'], {'indexes': 'None', 'tilesize': 'tile_size_pixels', 'nodata': 'None', 'alpha': 'None', 'dst_crs': "dst_profile['crs']"}), "(src, ll_x, ll_y, ur_x, ur_y, indexes=None, tilesize=\n tile_size_pixels, nodata=None, alpha=None, dst_crs=dst_profile['crs'])\n", (4903, 5031), False, 'from cw_tiler import main\n'), ((5092, 5118), 'cw_nets.Ternaus_tools.tn_tools.reform_tile', 'tn_tools.reform_tile', (['tile'], {}), '(tile)\n', (5112, 5118), False, 'from cw_nets.Ternaus_tools import tn_tools\n'), ((5151, 5168), 'cw_nets.Ternaus_tools.tn_tools.pad', 'tn_tools.pad', (['img'], {}), '(img)\n', (5163, 5168), False, 'from cw_nets.Ternaus_tools import tn_tools\n'), ((5293, 5333), 'cw_nets.Ternaus_tools.tn_tools.predict', 'tn_tools.predict', (['model', 'input_img', 'pads'], {}), '(model, input_img, pads)\n', (5309, 5333), False, 'from cw_nets.Ternaus_tools import tn_tools\n'), ((5801, 5842), 'cw_nets.Ternaus_tools.tn_tools.unpad', 'tn_tools.unpad', (["predictDict['mask']", 'pads'], {}), "(predictDict['mask'], pads)\n", (5815, 5842), False, 'from cw_nets.Ternaus_tools import tn_tools\n'), ((5930, 5971), 'cw_nets.Ternaus_tools.tn_tools.unpad', 'tn_tools.unpad', (["predictDict['seed']", 'pads'], {}), "(predictDict['seed'], pads)\n", (5944, 5971), False, 'from cw_nets.Ternaus_tools import tn_tools\n'), ((6056, 6092), 'numpy.ones', 'np.ones', (["predictDict['labels'].shape"], {}), "(predictDict['labels'].shape)\n", (6063, 6092), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 9 13:27:16 2019
@author: <NAME> and <NAME>
"""
import os
import numpy as np
from osgeo import gdal
#datagen = ImageDataGenerator()
#TASK TO DO.
#THERE ARE TWO IMAGES TO LOAD HERE. 1 IS THE MAIN SAT IMAGE AND THE OTHER IS THE WATER IMAGE.
def load_data(batch_size=1, is_testing=False):
data_type = "train" if not is_testing else "test"
data_main='C:\\Users\\user\\Desktop\\Projects\\ImageTileFD\\data_roads\\'
data_sat='C:\\Users\\user\\Desktop\\Projects\\ImageTileFD\\data_roads\\Data\\'
data_water='C:\\Users\\user\\Desktop\\Projects\\ImageTileFD\\data_roads\\Labels\\'
images_water = os.listdir(data_water)
images_sat = os.listdir(data_sat)
args = np.intersect1d(images_water, images_sat)
batch_images = np.random.choice(args, size=batch_size)
sat_data = []
water_data = []
for img_path in batch_images:
sat_img = gdal.Open(data_sat+img_path).ReadAsArray()
water_img=gdal.Open(data_water+img_path).ReadAsArray()
water_img[water_img!=water_img]= 0
water_img[water_img>0] = 1
sat_img = np.einsum('ijk->jki', sat_img)
sat_img = (sat_img - sat_img.min()) / (sat_img.max() - sat_img.min())
pad = np.zeros((256,256,3))
pad_w = np.zeros((256,256))
pad[:220,:220,:]=sat_img
pad_w[:220,:220]=water_img
# sat_img = (np.zeros(256,256,3)[:220,:220]=sat_img)
sat_data.append(pad)
water_data.append(pad_w)
water_data = np.array(water_data)
water_data = np.expand_dims(water_data, axis=-1)
sat_data = np.array(sat_data)
return water_data,sat_data
def load_batch(batch_size=1, is_testing=False):
data_type = "train" if not is_testing else "test"
data_main='C:\\Users\\user\\Desktop\\Projects\\ImageTileFD\\data_roads\\'
data_sat='C:\\Users\\user\\Desktop\\Projects\\ImageTileFD\\data_roads\\Data\\'
data_water='C:\\Users\\user\\Desktop\\Projects\\ImageTileFD\\data_roads\\Labels\\'
images_water = os.listdir(data_water)
images_sat = os.listdir(data_sat)
args = np.intersect1d(images_water, images_sat)
#batch_images = np.random.choice(os.listdir(data_sat), size=batch_size)
n_batches = int(len(args) / batch_size)
for i in range(n_batches-1):
batch_images = args[i*batch_size:(i+1)*batch_size]
sat_data = []
water_data = []
for img_path in batch_images:
# print(data_sat+img_path
sat_img = gdal.Open(data_sat+img_path).ReadAsArray()
water_img=gdal.Open(data_water+img_path).ReadAsArray()
water_img[water_img!=water_img]= 0
water_img[water_img>0] = 1
sat_img = np.einsum('ijk->jki', sat_img)
sat_img = (sat_img - sat_img.min()) / (sat_img.max() - sat_img.min())
pad = np.zeros((256,256,3))
pad_w = np.zeros((256,256))
pad[:220,:220,:]=sat_img
pad_w[:220,:220]=water_img
# sat_img = (np.zeros(256,256,3)[:220,:220]=sat_img)
sat_data.append(pad)
water_data.append(pad_w)
water_data = np.array(water_data)
water_data = np.expand_dims(water_data, axis=-1)
sat_data = np.array(sat_data)
yield water_data,sat_data
##print(load_data(batch_size=10))
#image_generator=load_batch(batch_size=500)
#water_data, sat_data=next(image_generator)
## | [
"osgeo.gdal.Open",
"numpy.intersect1d",
"os.listdir",
"numpy.random.choice",
"numpy.array",
"numpy.zeros",
"numpy.einsum",
"numpy.expand_dims"
] | [((677, 699), 'os.listdir', 'os.listdir', (['data_water'], {}), '(data_water)\n', (687, 699), False, 'import os\n'), ((718, 738), 'os.listdir', 'os.listdir', (['data_sat'], {}), '(data_sat)\n', (728, 738), False, 'import os\n'), ((757, 797), 'numpy.intersect1d', 'np.intersect1d', (['images_water', 'images_sat'], {}), '(images_water, images_sat)\n', (771, 797), True, 'import numpy as np\n'), ((824, 863), 'numpy.random.choice', 'np.random.choice', (['args'], {'size': 'batch_size'}), '(args, size=batch_size)\n', (840, 863), True, 'import numpy as np\n'), ((1574, 1594), 'numpy.array', 'np.array', (['water_data'], {}), '(water_data)\n', (1582, 1594), True, 'import numpy as np\n'), ((1613, 1648), 'numpy.expand_dims', 'np.expand_dims', (['water_data'], {'axis': '(-1)'}), '(water_data, axis=-1)\n', (1627, 1648), True, 'import numpy as np\n'), ((1665, 1683), 'numpy.array', 'np.array', (['sat_data'], {}), '(sat_data)\n', (1673, 1683), True, 'import numpy as np\n'), ((2112, 2134), 'os.listdir', 'os.listdir', (['data_water'], {}), '(data_water)\n', (2122, 2134), False, 'import os\n'), ((2153, 2173), 'os.listdir', 'os.listdir', (['data_sat'], {}), '(data_sat)\n', (2163, 2173), False, 'import os\n'), ((2192, 2232), 'numpy.intersect1d', 'np.intersect1d', (['images_water', 'images_sat'], {}), '(images_water, images_sat)\n', (2206, 2232), True, 'import numpy as np\n'), ((1166, 1196), 'numpy.einsum', 'np.einsum', (['"""ijk->jki"""', 'sat_img'], {}), "('ijk->jki', sat_img)\n", (1175, 1196), True, 'import numpy as np\n'), ((1298, 1321), 'numpy.zeros', 'np.zeros', (['(256, 256, 3)'], {}), '((256, 256, 3))\n', (1306, 1321), True, 'import numpy as np\n'), ((1337, 1357), 'numpy.zeros', 'np.zeros', (['(256, 256)'], {}), '((256, 256))\n', (1345, 1357), True, 'import numpy as np\n'), ((3268, 3288), 'numpy.array', 'np.array', (['water_data'], {}), '(water_data)\n', (3276, 3288), True, 'import numpy as np\n'), ((3311, 3346), 'numpy.expand_dims', 'np.expand_dims', (['water_data'], {'axis': '(-1)'}), '(water_data, axis=-1)\n', (3325, 3346), True, 'import numpy as np\n'), ((3377, 3395), 'numpy.array', 'np.array', (['sat_data'], {}), '(sat_data)\n', (3385, 3395), True, 'import numpy as np\n'), ((2821, 2851), 'numpy.einsum', 'np.einsum', (['"""ijk->jki"""', 'sat_img'], {}), "('ijk->jki', sat_img)\n", (2830, 2851), True, 'import numpy as np\n'), ((2954, 2977), 'numpy.zeros', 'np.zeros', (['(256, 256, 3)'], {}), '((256, 256, 3))\n', (2962, 2977), True, 'import numpy as np\n'), ((2997, 3017), 'numpy.zeros', 'np.zeros', (['(256, 256)'], {}), '((256, 256))\n', (3005, 3017), True, 'import numpy as np\n'), ((960, 990), 'osgeo.gdal.Open', 'gdal.Open', (['(data_sat + img_path)'], {}), '(data_sat + img_path)\n', (969, 990), False, 'from osgeo import gdal\n'), ((1022, 1054), 'osgeo.gdal.Open', 'gdal.Open', (['(data_water + img_path)'], {}), '(data_water + img_path)\n', (1031, 1054), False, 'from osgeo import gdal\n'), ((2599, 2629), 'osgeo.gdal.Open', 'gdal.Open', (['(data_sat + img_path)'], {}), '(data_sat + img_path)\n', (2608, 2629), False, 'from osgeo import gdal\n'), ((2665, 2697), 'osgeo.gdal.Open', 'gdal.Open', (['(data_water + img_path)'], {}), '(data_water + img_path)\n', (2674, 2697), False, 'from osgeo import gdal\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 13 10:27:00 2017
@author: ben
"""
import numpy as np
class ATL06_pair:
def __init__(self, D6=None, pair_data=None):
if D6 is not None:
#initializes based on input D6, assumed to contain one pair
# 2a. Set pair_data x and y
self.x=np.mean(D6.x_atc) # mean of the pair, nan if not both defined
self.y=np.mean(D6.y_atc)
self.dh_dx=D6.dh_fit_dx
self.dh_dx.shape=[1,2]
self.dh_dy=np.mean(D6.dh_fit_dy)
self.delta_time=np.mean(D6.delta_time)
self.segment_id=np.mean(D6.segment_id)
self.cycle=np.mean(D6.cycle_number)
self.h=D6.h_li
self.h.shape=[1,2]
self.valid=np.zeros(1, dtype='bool')
elif pair_data is not None:
# initializes based on a list of pairs, to produce a structure with numpy arrays for fields
for field in ('x','y','dh_dx','dh_dy','delta_time','segment_id','cycle','h','valid'):
setattr(self, field, np.c_[[getattr(this_pair,field).ravel() for this_pair in pair_data]])
else:
#initializes an empty structure
for field in ('x','y','dh_dx','dh_dy','delta_time','segment_id','cycle','h','valid'):
setattr(self, field, np.NaN)
def __getitem__(self, key):
temp06=ATL06_pair()
for field in ('x','y','dh_dx','dh_dy','delta_time','segment_id','cycle','h','valid'):
temp_field=getattr(self, field)
if len(temp_field.shape)>1 and temp_field.shape[1] > 1:
setattr(temp06, temp_field[key,:])
else:
setattr(temp06, temp_field[key])
return temp06 | [
"numpy.mean",
"numpy.zeros"
] | [((327, 344), 'numpy.mean', 'np.mean', (['D6.x_atc'], {}), '(D6.x_atc)\n', (334, 344), True, 'import numpy as np\n'), ((409, 426), 'numpy.mean', 'np.mean', (['D6.y_atc'], {}), '(D6.y_atc)\n', (416, 426), True, 'import numpy as np\n'), ((521, 542), 'numpy.mean', 'np.mean', (['D6.dh_fit_dy'], {}), '(D6.dh_fit_dy)\n', (528, 542), True, 'import numpy as np\n'), ((571, 593), 'numpy.mean', 'np.mean', (['D6.delta_time'], {}), '(D6.delta_time)\n', (578, 593), True, 'import numpy as np\n'), ((622, 644), 'numpy.mean', 'np.mean', (['D6.segment_id'], {}), '(D6.segment_id)\n', (629, 644), True, 'import numpy as np\n'), ((668, 692), 'numpy.mean', 'np.mean', (['D6.cycle_number'], {}), '(D6.cycle_number)\n', (675, 692), True, 'import numpy as np\n'), ((774, 799), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': '"""bool"""'}), "(1, dtype='bool')\n", (782, 799), True, 'import numpy as np\n')] |
import time
import torch
import torch.nn as nn
import torch.utils as utils
from torch.autograd import Variable
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
# import matplotlib
# matplotlib.use('agg')
import matplotlib.pyplot as plt
# Set Hyperparameters
# noise_type = 'gaussian_add' # Possible values: 'gaussian_add', 'noise_salt_pepper', 'noise_masking' or None
noise_types = ['gaussian_add', 'noise_salt_pepper', 'noise_masking', 'None']
# finetune = False # see below..
num_epochs_autoencoder = 2 # 10
num_epochs_classifier = 7 # 30
batch_size = 128
learning_rate = 0.001
LAYER_DIMS = [16, 8, 8]
# Check if we can use CUDA
cuda_available = torch.cuda.is_available()
# Define image transformations & Initialize datasets
mnist_transforms = transforms.Compose([transforms.ToTensor()])
mnist_train = dset.MNIST('./data', train=True, transform=mnist_transforms, download=True)
mnist_test = dset.MNIST('./data', train=False, transform=mnist_transforms, download=True)
# For reproducibility
torch.manual_seed(123)
np.random.seed(123)
# Data loaders
train_loader = torch.utils.data.DataLoader(dataset=mnist_train,
batch_size=batch_size,
shuffle=True,
num_workers=2,
drop_last=True)
testloader = torch.utils.data.DataLoader(dataset=mnist_test,
batch_size=batch_size,
shuffle=True,
num_workers=2,
drop_last=True)
# # Choose 5000 examples for transfer learning
# mask = np.random.randint(0, 60000, 5000)
# finetune_loader = torch.utils.data.DataLoader(dataset=mnist_train,
# batch_size=batch_size,
# shuffle=False,
# sampler=SubsetRandomSampler(np.where(mask)[0]),
# num_workers=2)
# Create Encoder and Decoder that subclasses nn.Module
class Encoder(nn.Module):
"""Convnet Encoder"""
def __init__(self):
super(Encoder, self).__init__()
# 28 x 28 -> 14 x 14
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=LAYER_DIMS[0], kernel_size=(3, 3), padding=1),
nn.ReLU(),
nn.BatchNorm2d(num_features=LAYER_DIMS[0]),
nn.MaxPool2d(kernel_size=(2, 2), stride=2)
)
# 14 x 14 -> 7 x 7
self.layer2 = nn.Sequential(
nn.Conv2d(in_channels=LAYER_DIMS[0], out_channels=LAYER_DIMS[1], kernel_size=(3, 3), padding=1),
nn.ReLU(),
nn.BatchNorm2d(num_features=LAYER_DIMS[1]),
nn.MaxPool2d(kernel_size=(2, 2), stride=2)
)
# 7 x 7 -> 4 x 4
self.layer3 = nn.Sequential(
nn.Conv2d(in_channels=LAYER_DIMS[1], out_channels=LAYER_DIMS[2], kernel_size=(3, 3), padding=2),
nn.ReLU(),
nn.BatchNorm2d(num_features=LAYER_DIMS[2]),
nn.MaxPool2d(kernel_size=(2, 2), stride=2)
)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
return out
class Decoder(nn.Module):
"""Convnet Decoder"""
def __init__(self):
super(Decoder, self).__init__()
# 4 x 4 -> 7 x 7
self.layer1 = nn.Sequential(
nn.ConvTranspose2d(in_channels=LAYER_DIMS[2], out_channels=LAYER_DIMS[1],
kernel_size=(3, 3), stride=2, padding=1, output_padding=0),
nn.ReLU(),
nn.BatchNorm2d(LAYER_DIMS[1]),
)
# 7 x 7 -> 14 x 14
self.layer2 = nn.Sequential(
nn.ConvTranspose2d(in_channels=LAYER_DIMS[1], out_channels=LAYER_DIMS[0],
kernel_size=(3, 3), stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.BatchNorm2d(LAYER_DIMS[0]),
)
# 14 x 14 -> 28 x 28
self.layer3 = nn.Sequential(
nn.ConvTranspose2d(in_channels=LAYER_DIMS[0], out_channels=1,
kernel_size=(3, 3), stride=2, padding=1, output_padding=1),
nn.Sigmoid(),
)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
return out
# Create a Classifer for the Encoder features
class Classifier(nn.Module):
"""Convnet Classifier"""
def __init__(self):
super(Classifier, self).__init__()
self.classifier = nn.Sequential(
nn.Conv2d(in_channels=LAYER_DIMS[2], out_channels=10, kernel_size=(4, 4), padding=0),
)
def forward(self, x):
out = self.classifier(x).squeeze()
return out
def noise_additive_gaussian(imgs, sigma=.5):
"""
Adds additive gaussian noise to images for the training of a DAE
Args:
imgs: A batch of images
sigma: Standard deviation of the gaussian noise
Returns:
imgs_n: The noisy images
"""
#######################################################################
# #
# Apply additive Gaussian noise to the images #
# #
#######################################################################
# src: https://discuss.pytorch.org/t/writing-a-simple-gaussian-noise-layer-in-pytorch/4694
mean = 0.0
noise = Variable(imgs.data.new(imgs.size()).normal_(mean, sigma))
imgs_n = imgs + noise
#######################################################################
# END OF YOUR CODE #
#######################################################################
return imgs_n
def noise_salt_pepper(imgs, noise_rate=0.5):
"""
Adds salt&pepper noise to images for the training of a DAE
Args:
imgs: A batch of images
noise_rate: Controls the amount of noise (higher=more noise)
Returns:
imgs_n: The noisy images
"""
#######################################################################
# #
# Apply Salt&Pepper noise to the images #
# #
#######################################################################
imgs_clone = imgs.clone().view(-1, 1)
num_feature = imgs_clone.size(0)
mn = imgs_clone.min()
mx = imgs_clone.max()
indices = np.random.randint(0, num_feature, int(num_feature * noise_rate))
for elem in indices:
if np.random.random() < 0.5:
imgs_clone[elem] = mn
else:
imgs_clone[elem] = mx
imgs_n = imgs_clone.view(imgs.size())
#######################################################################
# END OF YOUR CODE #
#######################################################################
return imgs_n
def noise_masking(imgs, drop_rate=0.5, tile_size=7):
"""
Randomly sets tiles of images to zero for the training of a DAE
Args:
imgs: A batch of images
drop_rate: Controls the amount of tile dropping (higher=more noise)
tile_size: The size of the tiles to be dropped in pixels
Returns:
imgs_n: The noisy images
"""
#######################################################################
# #
# Apply masking to the images #
# #
#######################################################################
imgs_clone = imgs.clone()
lenx = imgs_clone.size(2)
leny = imgs_clone.size(3)
for i in range(imgs_clone.size(0)):
for idx in range(0, lenx, tile_size):
for idy in range(0, leny, tile_size):
if np.random.random() < drop_rate:
for j in range(idx, idx + tile_size):
for k in range(idy, idy + tile_size):
imgs_clone[i, 0, j, k] = 0
imgs_n = imgs_clone
#######################################################################
# END OF YOUR CODE #
#######################################################################
return imgs_n
# Task 1: evaluate learnt model on each denoising task
# => run entire program for each noise_type
# => Save a png for each encoded + decoded noise
# => Calculate accuracy of learnt model using the classifier but without finetuning (don't change learnt model)
# Task 2: what is impact of finetuning vs fixed feature representations and how does it change with dataset size?
# => run everything once without finetuning and once with finetuning
# => run with 3 different transfer dataset sizes: 5000, 2500, 1000
for finetune in [False, True]:
# print('finetune:', finetune, '-->')
for transfer_dataset_size in [5000, 2500, 1000]:
# print('transfer_dataset_size:', transfer_dataset_size, '-->')
# Choose transfer_dataset_size examples for transfer learning
mask = np.random.randint(0, 60000, transfer_dataset_size)
finetune_loader = torch.utils.data.DataLoader(dataset=mnist_train,
batch_size=batch_size,
shuffle=False,
sampler=SubsetRandomSampler(np.where(mask)[0]),
num_workers=2)
for noise_type in noise_types:
# print('run for noise type:', noise_type, '-->')
encoder = Encoder()
decoder = Decoder()
if cuda_available:
encoder = encoder.cuda()
decoder = decoder.cuda()
# Define Loss and Optimizer for DAE training
parameters = list(encoder.parameters()) + list(decoder.parameters())
loss_func = nn.MSELoss()
optimizer = torch.optim.Adam(parameters, lr=learning_rate)
# Get noise function to be applied to images
if noise_type is 'gaussian_add':
image_fn = noise_additive_gaussian
elif noise_type is 'noise_salt_pepper':
image_fn = noise_salt_pepper
elif noise_type is 'noise_masking':
image_fn = noise_masking
else:
# Default is no noise (standard AE)
image_fn = lambda x: x
# print('--------------------------------------------------------------')
# print('---------------------- Training DAE --------------------------')
# print('--------------------------------------------------------------')
# Train the Autoencoder
for epoch in range(num_epochs_autoencoder):
losses = []
start = time.time()
for batch_index, (images, _) in enumerate(train_loader):
if cuda_available:
images = images.cuda()
images = Variable(images)
image_noised = image_fn(images)
# Training Step
optimizer.zero_grad()
output = encoder(image_noised)
output = decoder(output)
loss = loss_func(output, images)
loss.backward()
optimizer.step()
losses.append(loss.data[0])
#if batch_index % 50 == 0:
# print('Epoch: {}, Iter: {:3d}, Loss: {:.4f}'.format(epoch, batch_index, loss.data[0]))
end = time.time()
# print('Epoch: {}, Average Loss: {:.4f}, Time: {:.4f}'.format(epoch, np.mean(losses), end - start))
# Set encoder and decoder in evaluation mode to use running means and averages for Batchnorm
encoder.eval()
decoder.eval()
if transfer_dataset_size == 5000 and not finetune and noise_type is not 'None':
# save a plot only once for each noise type...
# Get a batch of test images
test_imgs, test_labels = next(iter(testloader))
if cuda_available:
test_imgs, test_labels = test_imgs.cuda(), test_labels.cuda()
test_imgs, test_labels = Variable(test_imgs), Variable(test_labels)
test_imgs_noised = image_fn(test_imgs)
output = encoder(test_imgs_noised)
output = decoder(output)
# Visualize in and output of the Autoencoder
fig_out = plt.figure('out', figsize=(10, 10))
fig_in = plt.figure('in', figsize=(10, 10))
for ind, (img_out, img_in) in enumerate(zip(output, test_imgs_noised)):
if ind > 15:
break
plt.figure('out')
fig_out.add_subplot(4, 4, ind + 1)
plt.imshow(img_out.data.cpu().numpy().reshape(28, 28), cmap='gray')
plt.axis('off')
plt.figure('in')
fig_in.add_subplot(4, 4, ind + 1)
plt.imshow(img_in.data.cpu().numpy().reshape(28, 28), cmap='gray')
plt.axis('off')
fig_in.savefig(noise_type + '-encoded.png')
fig_out.savefig(noise_type + '-decoded.png')
#plt.show()
# print('--------------------------------------------------------------')
# print('------------------- Transfer Learning ------------------------')
# print('--------------------------------------------------------------')
#######################################################################
# #
# Prepare everything for transfer learning: #
# - Build the classifier #
# - Define the optimizer #
# - Define the loss function #
# Note: The setup might be different for finetuning or fixed features #
# (see variable finetune!) #
# #
#######################################################################
clf = Classifier()
if cuda_available:
clf = clf.cuda()
if finetune:
parameters = list(encoder.parameters()) + list(clf.parameters())
encoder.train()
else:
parameters = clf.parameters()
optimizer = torch.optim.SGD(parameters, lr=learning_rate, momentum=0.9)
loss_func = nn.CrossEntropyLoss()
#######################################################################
# END OF YOUR CODE #
#######################################################################
# Train the Classifier
best_test_accuracy = 0
for epoch in range(num_epochs_classifier):
losses = []
start = time.time()
for batch_index, (images, labels) in enumerate(finetune_loader):
if cuda_available:
images, labels = images.cuda(), labels.cuda()
images, labels = Variable(images), Variable(labels)
# Training Step
optimizer.zero_grad()
output = encoder(images)
output = clf(output)
loss = loss_func(output, labels)
loss.backward()
optimizer.step()
losses.append(loss.data[0])
end = time.time()
# print('Epoch: {}, Average Loss: {:.4f}, Time: {:.4f}'.format(epoch, np.mean(losses), end - start))
#######################################################################
# #
# Evaluate the classifier on the test set by computing the accuracy #
# of the classifier #
# #
#######################################################################
clf.eval()
if finetune:
encoder.eval()
batch_accuracies = []
for batch_index, (images, labels) in enumerate(testloader):
if cuda_available:
images, labels = images.cuda(), labels.cuda()
images, labels = Variable(images), Variable(labels)
output = encoder(images)
prediction = clf(output)
prediction = torch.max(prediction, 1)[1]
correct = prediction.long().eq(labels).sum().data[0]
batch_size = labels.size(0)
batch_accuracies.append(correct * (100.0 / batch_size))
accuracy = np.array(batch_accuracies).mean()
if accuracy > best_test_accuracy:
best_test_accuracy = accuracy
#######################################################################
# END OF YOUR CODE #
#######################################################################
# print('Epoch: {}, Test Acc: {:.4f}'.format(epoch, accuracy))
# print('--------------------------------------------------------------')
clf.train()
if finetune:
encoder.train()
# print('run for noise type:', noise_type, '<-- [best test accuracy achieved:', best_test_accuracy, ']')
# print('transfer_dataset_size:', transfer_dataset_size, '<--')
# print('finetune:', finetune, '<--')
| [
"torch.nn.ReLU",
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.nn.MSELoss",
"numpy.array",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"torch.nn.Sigmoid",
"numpy.random.random",
"numpy.where",
"numpy.random.seed",
"matplotlib.pyplot.axis",
"torchvision.transforms.ToTensor",
"torc... | [((761, 786), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (784, 786), False, 'import torch\n'), ((918, 993), 'torchvision.datasets.MNIST', 'dset.MNIST', (['"""./data"""'], {'train': '(True)', 'transform': 'mnist_transforms', 'download': '(True)'}), "('./data', train=True, transform=mnist_transforms, download=True)\n", (928, 993), True, 'import torchvision.datasets as dset\n'), ((1007, 1083), 'torchvision.datasets.MNIST', 'dset.MNIST', (['"""./data"""'], {'train': '(False)', 'transform': 'mnist_transforms', 'download': '(True)'}), "('./data', train=False, transform=mnist_transforms, download=True)\n", (1017, 1083), True, 'import torchvision.datasets as dset\n'), ((1107, 1129), 'torch.manual_seed', 'torch.manual_seed', (['(123)'], {}), '(123)\n', (1124, 1129), False, 'import torch\n'), ((1130, 1149), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (1144, 1149), True, 'import numpy as np\n'), ((1181, 1301), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'mnist_train', 'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(2)', 'drop_last': '(True)'}), '(dataset=mnist_train, batch_size=batch_size,\n shuffle=True, num_workers=2, drop_last=True)\n', (1208, 1301), False, 'import torch\n'), ((1483, 1602), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'mnist_test', 'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(2)', 'drop_last': '(True)'}), '(dataset=mnist_test, batch_size=batch_size,\n shuffle=True, num_workers=2, drop_last=True)\n', (1510, 1602), False, 'import torch\n'), ((880, 901), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (899, 901), True, 'import torchvision.transforms as transforms\n'), ((9727, 9777), 'numpy.random.randint', 'np.random.randint', (['(0)', '(60000)', 'transfer_dataset_size'], {}), '(0, 60000, transfer_dataset_size)\n', (9744, 9777), True, 'import numpy as np\n'), ((2468, 2555), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': 'LAYER_DIMS[0]', 'kernel_size': '(3, 3)', 'padding': '(1)'}), '(in_channels=1, out_channels=LAYER_DIMS[0], kernel_size=(3, 3),\n padding=1)\n', (2477, 2555), True, 'import torch.nn as nn\n'), ((2565, 2574), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2572, 2574), True, 'import torch.nn as nn\n'), ((2588, 2630), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'LAYER_DIMS[0]'}), '(num_features=LAYER_DIMS[0])\n', (2602, 2630), True, 'import torch.nn as nn\n'), ((2644, 2686), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2, 2)', 'stride': '(2)'}), '(kernel_size=(2, 2), stride=2)\n', (2656, 2686), True, 'import torch.nn as nn\n'), ((2773, 2872), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'LAYER_DIMS[0]', 'out_channels': 'LAYER_DIMS[1]', 'kernel_size': '(3, 3)', 'padding': '(1)'}), '(in_channels=LAYER_DIMS[0], out_channels=LAYER_DIMS[1],\n kernel_size=(3, 3), padding=1)\n', (2782, 2872), True, 'import torch.nn as nn\n'), ((2882, 2891), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2889, 2891), True, 'import torch.nn as nn\n'), ((2905, 2947), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'LAYER_DIMS[1]'}), '(num_features=LAYER_DIMS[1])\n', (2919, 2947), True, 'import torch.nn as nn\n'), ((2961, 3003), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2, 2)', 'stride': '(2)'}), '(kernel_size=(2, 2), stride=2)\n', (2973, 3003), True, 'import torch.nn as nn\n'), ((3088, 3187), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'LAYER_DIMS[1]', 'out_channels': 'LAYER_DIMS[2]', 'kernel_size': '(3, 3)', 'padding': '(2)'}), '(in_channels=LAYER_DIMS[1], out_channels=LAYER_DIMS[2],\n kernel_size=(3, 3), padding=2)\n', (3097, 3187), True, 'import torch.nn as nn\n'), ((3197, 3206), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3204, 3206), True, 'import torch.nn as nn\n'), ((3220, 3262), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': 'LAYER_DIMS[2]'}), '(num_features=LAYER_DIMS[2])\n', (3234, 3262), True, 'import torch.nn as nn\n'), ((3276, 3318), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2, 2)', 'stride': '(2)'}), '(kernel_size=(2, 2), stride=2)\n', (3288, 3318), True, 'import torch.nn as nn\n'), ((3659, 3795), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': 'LAYER_DIMS[2]', 'out_channels': 'LAYER_DIMS[1]', 'kernel_size': '(3, 3)', 'stride': '(2)', 'padding': '(1)', 'output_padding': '(0)'}), '(in_channels=LAYER_DIMS[2], out_channels=LAYER_DIMS[1],\n kernel_size=(3, 3), stride=2, padding=1, output_padding=0)\n', (3677, 3795), True, 'import torch.nn as nn\n'), ((3836, 3845), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3843, 3845), True, 'import torch.nn as nn\n'), ((3859, 3888), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['LAYER_DIMS[1]'], {}), '(LAYER_DIMS[1])\n', (3873, 3888), True, 'import torch.nn as nn\n'), ((3976, 4112), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': 'LAYER_DIMS[1]', 'out_channels': 'LAYER_DIMS[0]', 'kernel_size': '(3, 3)', 'stride': '(2)', 'padding': '(1)', 'output_padding': '(1)'}), '(in_channels=LAYER_DIMS[1], out_channels=LAYER_DIMS[0],\n kernel_size=(3, 3), stride=2, padding=1, output_padding=1)\n', (3994, 4112), True, 'import torch.nn as nn\n'), ((4153, 4162), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4160, 4162), True, 'import torch.nn as nn\n'), ((4176, 4205), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['LAYER_DIMS[0]'], {}), '(LAYER_DIMS[0])\n', (4190, 4205), True, 'import torch.nn as nn\n'), ((4295, 4420), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', ([], {'in_channels': 'LAYER_DIMS[0]', 'out_channels': '(1)', 'kernel_size': '(3, 3)', 'stride': '(2)', 'padding': '(1)', 'output_padding': '(1)'}), '(in_channels=LAYER_DIMS[0], out_channels=1, kernel_size=(\n 3, 3), stride=2, padding=1, output_padding=1)\n', (4313, 4420), True, 'import torch.nn as nn\n'), ((4460, 4472), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4470, 4472), True, 'import torch.nn as nn\n'), ((4848, 4936), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'LAYER_DIMS[2]', 'out_channels': '(10)', 'kernel_size': '(4, 4)', 'padding': '(0)'}), '(in_channels=LAYER_DIMS[2], out_channels=10, kernel_size=(4, 4),\n padding=0)\n', (4857, 4936), True, 'import torch.nn as nn\n'), ((7060, 7078), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7076, 7078), True, 'import numpy as np\n'), ((10580, 10592), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (10590, 10592), True, 'import torch.nn as nn\n'), ((10617, 10663), 'torch.optim.Adam', 'torch.optim.Adam', (['parameters'], {'lr': 'learning_rate'}), '(parameters, lr=learning_rate)\n', (10633, 10663), False, 'import torch\n'), ((15551, 15610), 'torch.optim.SGD', 'torch.optim.SGD', (['parameters'], {'lr': 'learning_rate', 'momentum': '(0.9)'}), '(parameters, lr=learning_rate, momentum=0.9)\n', (15566, 15610), False, 'import torch\n'), ((15635, 15656), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (15654, 15656), True, 'import torch.nn as nn\n'), ((11517, 11528), 'time.time', 'time.time', ([], {}), '()\n', (11526, 11528), False, 'import time\n'), ((12317, 12328), 'time.time', 'time.time', ([], {}), '()\n', (12326, 12328), False, 'import time\n'), ((13308, 13343), 'matplotlib.pyplot.figure', 'plt.figure', (['"""out"""'], {'figsize': '(10, 10)'}), "('out', figsize=(10, 10))\n", (13318, 13343), True, 'import matplotlib.pyplot as plt\n'), ((13369, 13403), 'matplotlib.pyplot.figure', 'plt.figure', (['"""in"""'], {'figsize': '(10, 10)'}), "('in', figsize=(10, 10))\n", (13379, 13403), True, 'import matplotlib.pyplot as plt\n'), ((16088, 16099), 'time.time', 'time.time', ([], {}), '()\n', (16097, 16099), False, 'import time\n'), ((16724, 16735), 'time.time', 'time.time', ([], {}), '()\n', (16733, 16735), False, 'import time\n'), ((8454, 8472), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8470, 8472), True, 'import numpy as np\n'), ((11717, 11733), 'torch.autograd.Variable', 'Variable', (['images'], {}), '(images)\n', (11725, 11733), False, 'from torch.autograd import Variable\n'), ((13029, 13048), 'torch.autograd.Variable', 'Variable', (['test_imgs'], {}), '(test_imgs)\n', (13037, 13048), False, 'from torch.autograd import Variable\n'), ((13050, 13071), 'torch.autograd.Variable', 'Variable', (['test_labels'], {}), '(test_labels)\n', (13058, 13071), False, 'from torch.autograd import Variable\n'), ((13575, 13592), 'matplotlib.pyplot.figure', 'plt.figure', (['"""out"""'], {}), "('out')\n", (13585, 13592), True, 'import matplotlib.pyplot as plt\n'), ((13756, 13771), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (13764, 13771), True, 'import matplotlib.pyplot as plt\n'), ((13792, 13808), 'matplotlib.pyplot.figure', 'plt.figure', (['"""in"""'], {}), "('in')\n", (13802, 13808), True, 'import matplotlib.pyplot as plt\n'), ((13970, 13985), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (13978, 13985), True, 'import matplotlib.pyplot as plt\n'), ((10057, 10071), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (10065, 10071), True, 'import numpy as np\n'), ((16327, 16343), 'torch.autograd.Variable', 'Variable', (['images'], {}), '(images)\n', (16335, 16343), False, 'from torch.autograd import Variable\n'), ((16345, 16361), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (16353, 16361), False, 'from torch.autograd import Variable\n'), ((17734, 17750), 'torch.autograd.Variable', 'Variable', (['images'], {}), '(images)\n', (17742, 17750), False, 'from torch.autograd import Variable\n'), ((17752, 17768), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (17760, 17768), False, 'from torch.autograd import Variable\n'), ((17894, 17918), 'torch.max', 'torch.max', (['prediction', '(1)'], {}), '(prediction, 1)\n', (17903, 17918), False, 'import torch\n'), ((18148, 18174), 'numpy.array', 'np.array', (['batch_accuracies'], {}), '(batch_accuracies)\n', (18156, 18174), True, 'import numpy as np\n')] |
# ----------------------------------------------------------------------------
#
# MantaFlow fluid solver framework
# Copyright 2019-2020 <NAME>, <NAME>
#
# This program is free software, distributed under the terms of the
# Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Training (PRE versions)
#
# ----------------------------------------------------------------------------
import os, sys, glob, pickle, argparse, logging
log = logging.getLogger()
log.addHandler(logging.StreamHandler())
log.setLevel(logging.INFO)
import numpy as np
# default parameters
params = {
'tdata': './burgers-fdt-corr-set/sim_000000',
}
parser = argparse.ArgumentParser(description='Parse parameters', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-o', '--opath', default='/tmp/tf_test_model', help='output path')
parser.add_argument('--tmp', default=None, help='temporary directory; will override the system tmp')
parser.add_argument('--nolog', action='store_true', help='turn off logging')
parser.add_argument('--nopng', action='store_true', help='turn off saving PNGs')
parser.add_argument('--nostats', action='store_true', help='turn off stats')
parser.add_argument('--notrain', action='store_true', help='turn off training')
parser.add_argument('--novdata', action='store_true', help='turn off use of validation data')
parser.add_argument('--nogpu', action='store_true', help='turn off GPU')
parser.add_argument('--nozerocen', action='store_true', help='normalize data without zero-centered')
parser.add_argument('--augment', action='store_true', help='apply data augmentation')
parser.add_argument('--nsigma', default=1.0, type=float, help='normalize (or standardize) data with this sigma value')
parser.add_argument('--val', default=0.2, type=float, help='validation data (split) size')
parser.add_argument('--bsize', dest='batch_size', default=32, type=int, help='batch size')
parser.add_argument('--epochs', default=1000, type=int, help='number of epoch')
parser.add_argument('--seed', default=None, type=int, help='seed for random number generator')
parser.add_argument('--steps', dest='steps_per_epoch', default=None, type=int, help='how many steps (i.e., batches) per epoch')
parser.add_argument('--lr', default=1e-3, type=float, help='start learning rate')
parser.add_argument('--model', default='mars_moon', help='model name')
parser.add_argument('--inftr', default='scandium', help='input feature')
parser.add_argument('-k', '--keep', default=None, help='keep old model if exists')
parser.add_argument('tdata', action='store', nargs='+', help='npz files for training data')
pargs = parser.parse_args()
params.update(vars(pargs))
if params['nogpu']: os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import tensorflow as tf
from tensorflow import keras
if not params['nogpu']:
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU; to solve the problem of "Could not create cudnn handle: CUDNN_STATUS_ALLOC_FAILED"
sess = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(sess)
np.random.seed(0 if params['seed'] is None else params['seed'])
tf.compat.v1.set_random_seed(0 if params['seed'] is None else params['seed'])
paths = {}
paths['mdl'] = params['opath'] + '/model.h5'
paths['mdp'] = params['opath'] + '/model.png'
paths['mck'] = params['opath'] + '/model.ckh5'
paths['tsb'] = params['opath'] + '/logs'
paths['log'] = params['opath'] + '/run.log'
paths['png'] = params['opath'] + '/stats-png'
paths['pdf'] = params['opath'] + '/stats.pdf'
paths['pkl'] = params['opath'] + '/stats.pickle'
if params['keep'] is not None:
os.path.exists(params['opath']) and os.rename(params['opath'], params['keep']+'-'+params['opath'])
else:
import shutil, tempfile, datetime
tkey = datetime.datetime.now().strftime('%Y%m%d%H%M')
tmp = tempfile.gettempdir() if pargs.tmp is None else pargs.tmp
os.path.exists(params['opath']) and shutil.move(params['opath'], tmp+'/{}-{}'.format(tkey, params['opath'].replace('/', '-')))
os.path.exists(params['opath']) or os.makedirs(params['opath'])
##############################################################################
# data
def read_grid(path, rtbnd=-1, dtype='float32'):
# rtbnd: manually correct a potential shape mismatch
if path.endswith('.npz'):
npdata = np.load(path)['arr_0']
head = {
'dimX': npdata.shape[-2] + rtbnd,
'dimY': npdata.shape[-3] + rtbnd,
'dimZ': 1,
}
if npdata.shape[-1]>1:
npdata = np.concatenate(
(npdata[:-1,:-1,:].reshape((head['dimZ'], head['dimY'], head['dimX'], npdata.shape[-1])),
np.zeros(shape=(head['dimZ'], head['dimY'], head['dimX'], 1))),
axis=-1
)
else:
npdata = npdata.reshape((head['dimZ'], head['dimY'], head['dimX']))
return head, npdata
else:
print('Filetype is not supported.')
exit(0)
from scipy import stats
import tf_data as dmani
files = {
# inputs
'vel': [ f for ffs in (sorted(glob.glob('{}/velo_0*.npz'.format(i))) for i in params['tdata']) for f in ffs ],
'frc': [ f for ffs in (sorted(glob.glob('{}/forc_0*.npz'.format(i))) for i in params['tdata']) for f in ffs ],
# outputs
'Vco': [ f for ffs in (sorted(glob.glob('{}/corr_0*.npz'.format(i))) for i in params['tdata']) for f in ffs ],
}
assert all([ (len(files[i])==len(files['Vco'])) for i in files ]), 'Some data files are missing'
data_stats = {}
def lr_schedule(epoch, current_lr):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = current_lr
if epoch == 181: lr *= 0.5
elif epoch == 161: lr *= 1e-1
elif epoch == 121: lr *= 1e-1
elif epoch == 81: lr *= 1e-1
return lr
def getPrevFile(curr_i, offset, filekey, files):
prev_i = max([0, curr_i + offset])
if os.path.dirname(files[filekey][prev_i])==os.path.dirname(files[filekey][curr_i]):
return files[filekey][prev_i]
else:
for j in range(offset, 1, 1):
if os.path.dirname(files[filekey][curr_i+j])==os.path.dirname(files[filekey][curr_i]):
return files[filekey][curr_i+j]
return files[filekey][curr_i]
def data_scandium():
data_stats['nG'] = 1
data_stats['augment_flipX'] = [1.0,1.0, 1.0,1.0, 1.0,1.0] # v(n),u(n), fv(n),fu(n), vcorr_v vcorr_u
head, _ = read_grid(files['Vco'][0], -1, 'float32')
def ainput(i):
print('{}/{}'.format(i+1, len(files['vel'])), end='\r', flush=True)
return np.concatenate(
(
read_grid(files['vel'][i], -1, 'float32')[1][...,0:2][...,::-1].reshape((1, head['dimY'], head['dimX'], 2)), # revert indices! for phiflow
read_grid(files['frc'][i], -1, 'float32')[1][...,0:2][...,::-1].reshape((1, head['dimY'], head['dimX'], 2)), # revert indices! for phiflow
),
axis=3
)
inputs = [ ainput(i) for i,_ in enumerate(files['Vco']) ]
labels = [ read_grid(afile, -1, 'float32')[1][...,0:2][...,::-1].reshape((1, head['dimY'], head['dimX'], 2)) for afile in files['Vco'] ] # revert indices! for phiflow
data = {
'inputs': np.concatenate(tuple(inputs), axis=0),
'labels': np.concatenate(tuple(labels), axis=0),
}
return head, data
# def data_cobalt():
# data_stats['nG'] = 3
# data_stats['augment_flipX'] = [1.0,-1.0, 1.0,-1.0, 1.0,-1.0, 1.0,-1.0] # v(n-2),u(n-2), v(n-1),u(n-1), v(n),u(n), vcorr_v vcorr_u
# head, _ = read_grid(files['Vco'][0], -1, 'float32')
# def ainput(i):
# print('{}/{}'.format(i+1, len(files['vel'])), end='\r', flush=True)
# return [
# read_grid(getPrevFile(i, -j, 'vel', files), -1, 'float32')[1][...,0:2][...,::-1].reshape(1, head['dimY'], head['dimX'], 2) for j in range(2, -1, -1) # revert indices! for phiflow
# ]
# def inputs(): return [ np.concatenate(tuple(ainput(i)), axis=3) for i,_ in enumerate(files['Vco']) ]
# def labels(): return [ read_grid(afile, -1, 'float32')[1][...,0:2][...,::-1].reshape((1, head['dimY'], head['dimX'], 2)) for afile in files['Vco'] ] # revert indices! for phiflow
# data = {
# 'inputs': np.concatenate(tuple(inputs()), axis=0),
# 'labels': np.concatenate(tuple(labels()), axis=0),
# }
# return head, data
def augment_random_flip_sample(features, label):
asample = tf.concat([features, label], axis=-1)
do_flip = tf.random.uniform([]) > 0.5
nsample = tf.cond(do_flip, lambda: tf.image.flip_left_right(asample)*data_stats['augment_flipX'], lambda: asample)
return nsample[..., 0:-2], nsample[..., -2:]
print('\nLoading data ...', flush=True)
head, data = eval('data_{}()'.format(params['inftr']))
nX, nY, nF = head['dimX'], head['dimY'], data['inputs'].shape[-1]
data_stats.update({ 'feature': params['inftr'], 'nX': nX, 'nY': nY, 'nF': nF })
print('\n... Done', flush=True)
[ params['nolog'] or log.addHandler(logging.FileHandler(paths['log'])) ]
log.info(params)
log.info(paths)
log.info('{} -> {}'.format(data['inputs'].shape, data['labels'].shape))
perm = np.arange(data['labels'].shape[0])
np.random.shuffle(perm)
val_size = int(params['val']*data['labels'].shape[0]) if not params['novdata'] else data['labels'].shape[0]
train_inputs = data['inputs'][perm][:-val_size] if not params['novdata'] else data['inputs'][perm]
train_labels = data['labels'][perm][:-val_size] if not params['novdata'] else data['labels'][perm]
valid_inputs = data['inputs'][perm][-val_size:] if not params['novdata'] else data['inputs'][perm]
valid_labels = data['labels'][perm][-val_size:] if not params['novdata'] else data['labels'][perm]
log.info('training: {} -> {}'.format(train_inputs.shape, train_labels.shape))
log.info('validation: {} -> {}'.format(valid_inputs.shape, valid_labels.shape))
print('\nNormalizing data ...', flush=True)
data_stats.update(dmani.dataStats(idata=train_inputs, odata=train_labels))
data_stats.update({'nozerocen': params['nozerocen'], 'nsigma': params['nsigma']})
log.info(data_stats)
if not params['nostats']:
with open(paths['pkl'], 'wb') as f: pickle.dump(data_stats, f)
dmani.standardize(idata=train_inputs, odata=train_labels, dstats=data_stats, sigma_range=params['nsigma'], zero_centered=(not params['nozerocen']))
dmani.standardize(idata=valid_inputs, odata=valid_labels, dstats=data_stats, sigma_range=params['nsigma'], zero_centered=(not params['nozerocen']))
log.info(stats.describe(train_inputs, axis=None))
log.info(stats.describe(train_labels, axis=None))
log.info(stats.describe(valid_inputs, axis=None))
log.info(stats.describe(valid_labels, axis=None))
print('... Done', flush=True)
# data statistics
if not params['nostats']:
print('\nPlotting data statistics ...', flush=True, end='')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.backends.backend_pdf import PdfPages
params['nopng'] or os.path.exists(paths['png']) or os.makedirs(paths['png'])
with PdfPages(paths['pdf']) as pdf:
for i in data:
for j in range(data[i].shape[-1]):
dd = data[i][...,j].reshape(-1)
ss = stats.describe(dd)
plt.figure()
plt.hist(dd, bins=100, log=True)
plt.gca().text(0.5, 1.0, ss, wrap=True, horizontalalignment='center', verticalalignment='top', transform=plt.gca().transAxes, fontsize=8)
plt.title('Histogram of {}[{}]'.format(i, j))
plt.savefig(pdf, format='pdf')
params['nopng'] or plt.savefig('{}/{}_{}.png'.format(os.path.normpath(paths['png']), i, j))
plt.close('all')
for j in range(train_inputs.shape[-1]):
dd = train_inputs[...,j].reshape(-1)
ss = stats.describe(dd)
plt.figure()
plt.hist(dd, bins=100, log=True)
plt.gca().text(0.5, 1.0, ss, wrap=True, horizontalalignment='center', verticalalignment='top', transform=plt.gca().transAxes, fontsize=8)
plt.title('Histogram of input_train[{}] (normalized)'.format(j))
plt.savefig(pdf, format='pdf')
params['nopng'] or plt.savefig('{}/input_train_{}_normalized.png'.format(os.path.normpath(paths['png']), j))
plt.close('all')
for j in range(train_labels.shape[-1]):
dd = train_labels[...,j].reshape(-1)
ss = stats.describe(dd)
plt.figure()
plt.hist(dd, bins=100, log=True)
plt.gca().text(0.5, 1.0, ss, wrap=True, horizontalalignment='center', verticalalignment='top', transform=plt.gca().transAxes, fontsize=8)
plt.title('Histogram of label_train[{}] (normalized)'.format(j))
plt.savefig(pdf, format='pdf')
params['nopng'] or plt.savefig('{}/label_train_{}_normalized.png'.format(os.path.normpath(paths['png']), j))
plt.close('all')
print(' Done', flush=True)
def model_mercury():
with tf.name_scope('model_mercury') as scope:
return keras.Sequential([
keras.layers.Input(shape=(nY, nX, nF)),
keras.layers.Conv2D(filters=32, kernel_size=5, padding='same', activation=tf.nn.relu),
keras.layers.Conv2D(filters=64, kernel_size=5, padding='same', activation=tf.nn.relu),
keras.layers.Conv2D(filters=2, kernel_size=5, padding='same', activation=None), # u, v
])
def model_mars_moon():
with tf.name_scope('model_mars_moon') as scope:
l_input = keras.layers.Input(shape=(nY, nX, nF))
block_0 = keras.layers.Conv2D(filters=32, kernel_size=5, padding='same')(l_input)
block_0 = keras.layers.LeakyReLU()(block_0)
l_conv1 = keras.layers.Conv2D(filters=32, kernel_size=5, padding='same')(block_0)
l_conv1 = keras.layers.LeakyReLU()(l_conv1)
l_conv2 = keras.layers.Conv2D(filters=32, kernel_size=5, padding='same')(l_conv1)
l_skip1 = keras.layers.add([block_0, l_conv2])
block_1 = keras.layers.LeakyReLU()(l_skip1)
l_conv3 = keras.layers.Conv2D(filters=32, kernel_size=5, padding='same')(block_1)
l_conv3 = keras.layers.LeakyReLU()(l_conv3)
l_conv4 = keras.layers.Conv2D(filters=32, kernel_size=5, padding='same')(l_conv3)
l_skip2 = keras.layers.add([block_1, l_conv4])
block_2 = keras.layers.LeakyReLU()(l_skip2)
l_conv5 = keras.layers.Conv2D(filters=32, kernel_size=5, padding='same')(block_2)
l_conv5 = keras.layers.LeakyReLU()(l_conv5)
l_conv6 = keras.layers.Conv2D(filters=32, kernel_size=5, padding='same')(l_conv5)
l_skip3 = keras.layers.add([block_2, l_conv6])
block_3 = keras.layers.LeakyReLU()(l_skip3)
l_conv7 = keras.layers.Conv2D(filters=32, kernel_size=5, padding='same')(block_3)
l_conv7 = keras.layers.LeakyReLU()(l_conv7)
l_conv8 = keras.layers.Conv2D(filters=32, kernel_size=5, padding='same')(l_conv7)
l_skip4 = keras.layers.add([block_3, l_conv8])
block_4 = keras.layers.LeakyReLU()(l_skip4)
l_conv9 = keras.layers.Conv2D(filters=32, kernel_size=5, padding='same')(block_4)
l_conv9 = keras.layers.LeakyReLU()(l_conv9)
l_convA = keras.layers.Conv2D(filters=32, kernel_size=5, padding='same')(l_conv9)
l_skip5 = keras.layers.add([block_4, l_convA])
block_5 = keras.layers.LeakyReLU()(l_skip5)
l_output = keras.layers.Conv2D(filters=2, kernel_size=5, padding='same')(block_5)
return keras.models.Model(inputs=l_input, outputs=l_output)
def model_jupiter_moon():
with tf.name_scope('model_jupiter_moon') as scope:
l_input = keras.layers.Input(shape=(nY, nX, nF))
block = [keras.layers.Conv2D(filters=32, kernel_size=5, padding='same', activation=tf.nn.relu)(l_input)]
for i in range(2):
l_conv0 = keras.layers.Conv2D(filters=32, kernel_size=5, padding='same', activation=tf.nn.relu)(block[-1])
l_conv1 = keras.layers.Conv2D(filters=32, kernel_size=3, padding='same')(l_conv0)
l_skipc = keras.layers.add([block[-1], l_conv1])
block += [keras.layers.LeakyReLU()(l_skipc)]
for i in range(2):
l_conv0 = keras.layers.Conv2D(filters=64, kernel_size=5, padding='same', activation=tf.nn.relu)(block[-1])
l_conv1 = keras.layers.Conv2D(filters=64, kernel_size=3, padding='same')(l_conv0)
if i==0: block[-1] = keras.layers.Conv2D(filters=64, kernel_size=1)(block[-1])
l_skipc = keras.layers.add([block[-1], l_conv1])
block += [keras.layers.LeakyReLU()(l_skipc)]
for i in range(2):
l_conv0 = keras.layers.Conv2D(filters=32, kernel_size=5, padding='same', activation=tf.nn.relu)(block[-1])
l_conv1 = keras.layers.Conv2D(filters=32, kernel_size=3, padding='same')(l_conv0)
if i==0: block[-1] = keras.layers.Conv2D(filters=32, kernel_size=1)(block[-1])
l_skipc = keras.layers.add([block[-1], l_conv1])
block += [keras.layers.LeakyReLU()(l_skipc)]
l_output = keras.layers.Conv2D(filters=2, kernel_size=5, padding='same', activation=None)(block[-1])
return keras.models.Model(inputs=l_input, outputs=l_output)
from tensorflow.python.eager import context
class TrainValTensorBoard(keras.callbacks.TensorBoard):
def __init__(self, log_dir='./logs', **kwargs):
self.val_log_dir = os.path.join(log_dir, 'validation')
training_log_dir = os.path.join(log_dir, 'training')
super(TrainValTensorBoard, self).__init__(training_log_dir, **kwargs)
def set_model(self, model):
if context.executing_eagerly():
self.val_writer = tf.contrib.summary.create_file_writer(self.val_log_dir)
else:
self.val_writer = tf.compat.v1.summary.FileWriter(self.val_log_dir)
super(TrainValTensorBoard, self).set_model(model)
def _write_custom_summaries(self, step, logs=None):
logs = logs or {}
val_logs = {k.replace('val_', ''): v for k, v in logs.items() if 'val_' in k}
if context.executing_eagerly():
with self.val_writer.as_default(), tf.contrib.summary.always_record_summaries():
for name, value in val_logs.items():
tf.contrib.summary.scalar(name, value.item(), step=step)
else:
for name, value in val_logs.items():
summary = tf.compat.v1.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.val_writer.add_summary(summary, step)
self.val_writer.flush()
logs = {k: v for k, v in logs.items() if not 'val_' in k}
super(TrainValTensorBoard, self)._write_custom_summaries(step, logs)
def on_train_end(self, logs=None):
super(TrainValTensorBoard, self).on_train_end(logs)
self.val_writer.close()
class XTensorBoard(TrainValTensorBoard):
def lr_getter(self):
return np.float32(keras.backend.eval(self.model.optimizer.lr))
def on_epoch_end(self, episode, logs = {}):
logs.update({"lr": self.lr_getter()})
super(XTensorBoard, self).on_epoch_end(episode, logs)
if not params['notrain']:
log.info('tensorflow-{} ({}, {}); keras-{} ({})'.format(tf.__version__, tf.sysconfig.get_include(), tf.sysconfig.get_lib(), keras.__version__, keras.__path__))
dataset = tf.data.Dataset.from_tensor_slices((train_inputs, train_labels))
if params['augment']: dataset = dataset.map(augment_random_flip_sample)
dataset = dataset.shuffle(buffer_size=train_labels.shape[0]).batch(params['batch_size']).repeat()
valset = tf.data.Dataset.from_tensor_slices((valid_inputs, valid_labels)).batch(round(val_size*0.1)).repeat()
model = eval('model_{}()'.format(params['model']))
opt = keras.optimizers.Adam(lr=lr_schedule(epoch=0, current_lr=params['lr']))
model.compile(loss='mse', optimizer=opt, metrics=['mse', 'mae'])
model.summary(print_fn=log.info)
keras.utils.plot_model(model, to_file=paths['mdp'], show_shapes=True)
mycallbacks = [
XTensorBoard(log_dir=paths['tsb'], histogram_freq=10, batch_size=params['batch_size']),
keras.callbacks.ModelCheckpoint(paths['mck'], monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1),
keras.callbacks.LearningRateScheduler(lr_schedule),
# keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6),
# keras.callbacks.EarlyStopping(monitor='val_loss', patience=10),
]
model.fit(
dataset,
epochs=params['epochs'],
steps_per_epoch=params['steps_per_epoch'] if params['steps_per_epoch'] else round(train_labels.shape[0]/params['batch_size']),
validation_data=valset, validation_steps=10,
callbacks=mycallbacks)
log.info(model.evaluate(valid_inputs, valid_labels))
log.info(stats.describe(model.predict(valid_inputs), axis=None))
model.save(paths['mdl'])
lmodel = keras.models.load_model(paths['mdl'])
lmodel.summary(print_fn=log.info)
log.info(stats.describe(lmodel.predict(valid_inputs), axis=None))
| [
"logging.getLogger",
"logging.StreamHandler",
"matplotlib.pyplot.hist",
"tensorflow.keras.callbacks.LearningRateScheduler",
"tensorflow.keras.utils.plot_model",
"tensorflow.keras.models.load_model",
"tensorflow.compat.v1.set_random_seed",
"tensorflow.compat.v1.Session",
"numpy.arange",
"os.path.ex... | [((463, 482), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (480, 482), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((665, 781), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parse parameters"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Parse parameters', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (688, 781), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((3758, 3821), 'numpy.random.seed', 'np.random.seed', (["(0 if params['seed'] is None else params['seed'])"], {}), "(0 if params['seed'] is None else params['seed'])\n", (3772, 3821), True, 'import numpy as np\n'), ((3822, 3899), 'tensorflow.compat.v1.set_random_seed', 'tf.compat.v1.set_random_seed', (["(0 if params['seed'] is None else params['seed'])"], {}), "(0 if params['seed'] is None else params['seed'])\n", (3850, 3899), True, 'import tensorflow as tf\n'), ((10083, 10117), 'numpy.arange', 'np.arange', (["data['labels'].shape[0]"], {}), "(data['labels'].shape[0])\n", (10092, 10117), True, 'import numpy as np\n'), ((10118, 10141), 'numpy.random.shuffle', 'np.random.shuffle', (['perm'], {}), '(perm)\n', (10135, 10141), True, 'import numpy as np\n'), ((11123, 11272), 'tf_data.standardize', 'dmani.standardize', ([], {'idata': 'train_inputs', 'odata': 'train_labels', 'dstats': 'data_stats', 'sigma_range': "params['nsigma']", 'zero_centered': "(not params['nozerocen'])"}), "(idata=train_inputs, odata=train_labels, dstats=data_stats,\n sigma_range=params['nsigma'], zero_centered=not params['nozerocen'])\n", (11140, 11272), True, 'import tf_data as dmani\n'), ((11271, 11420), 'tf_data.standardize', 'dmani.standardize', ([], {'idata': 'valid_inputs', 'odata': 'valid_labels', 'dstats': 'data_stats', 'sigma_range': "params['nsigma']", 'zero_centered': "(not params['nozerocen'])"}), "(idata=valid_inputs, odata=valid_labels, dstats=data_stats,\n sigma_range=params['nsigma'], zero_centered=not params['nozerocen'])\n", (11288, 11420), True, 'import tf_data as dmani\n'), ((22168, 22205), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (["paths['mdl']"], {}), "(paths['mdl'])\n", (22191, 22205), False, 'from tensorflow import keras\n'), ((498, 521), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (519, 521), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((3460, 3486), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (3484, 3486), True, 'import tensorflow as tf\n'), ((3672, 3707), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (3692, 3707), True, 'import tensorflow as tf\n'), ((3712, 3756), 'tensorflow.compat.v1.keras.backend.set_session', 'tf.compat.v1.keras.backend.set_session', (['sess'], {}), '(sess)\n', (3750, 3756), True, 'import tensorflow as tf\n'), ((4720, 4751), 'os.path.exists', 'os.path.exists', (["params['opath']"], {}), "(params['opath'])\n", (4734, 4751), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((4755, 4783), 'os.makedirs', 'os.makedirs', (["params['opath']"], {}), "(params['opath'])\n", (4766, 4783), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((9371, 9408), 'tensorflow.concat', 'tf.concat', (['[features, label]'], {'axis': '(-1)'}), '([features, label], axis=-1)\n', (9380, 9408), True, 'import tensorflow as tf\n'), ((10869, 10924), 'tf_data.dataStats', 'dmani.dataStats', ([], {'idata': 'train_inputs', 'odata': 'train_labels'}), '(idata=train_inputs, odata=train_labels)\n', (10884, 10924), True, 'import tf_data as dmani\n'), ((11429, 11468), 'scipy.stats.describe', 'stats.describe', (['train_inputs'], {'axis': 'None'}), '(train_inputs, axis=None)\n', (11443, 11468), False, 'from scipy import stats\n'), ((11479, 11518), 'scipy.stats.describe', 'stats.describe', (['train_labels'], {'axis': 'None'}), '(train_labels, axis=None)\n', (11493, 11518), False, 'from scipy import stats\n'), ((11529, 11568), 'scipy.stats.describe', 'stats.describe', (['valid_inputs'], {'axis': 'None'}), '(valid_inputs, axis=None)\n', (11543, 11568), False, 'from scipy import stats\n'), ((11579, 11618), 'scipy.stats.describe', 'stats.describe', (['valid_labels'], {'axis': 'None'}), '(valid_labels, axis=None)\n', (11593, 11618), False, 'from scipy import stats\n'), ((11786, 11807), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (11800, 11807), False, 'import matplotlib\n'), ((20501, 20565), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(train_inputs, train_labels)'], {}), '((train_inputs, train_labels))\n', (20535, 20565), True, 'import tensorflow as tf\n'), ((21108, 21177), 'tensorflow.keras.utils.plot_model', 'keras.utils.plot_model', (['model'], {'to_file': "paths['mdp']", 'show_shapes': '(True)'}), "(model, to_file=paths['mdp'], show_shapes=True)\n", (21130, 21177), False, 'from tensorflow import keras\n'), ((4319, 4350), 'os.path.exists', 'os.path.exists', (["params['opath']"], {}), "(params['opath'])\n", (4333, 4350), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((4355, 4421), 'os.rename', 'os.rename', (["params['opath']", "(params['keep'] + '-' + params['opath'])"], {}), "(params['opath'], params['keep'] + '-' + params['opath'])\n", (4364, 4421), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((4530, 4551), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (4549, 4551), False, 'import shutil, tempfile, datetime\n'), ((4592, 4623), 'os.path.exists', 'os.path.exists', (["params['opath']"], {}), "(params['opath'])\n", (4606, 4623), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((6831, 6870), 'os.path.dirname', 'os.path.dirname', (['files[filekey][prev_i]'], {}), '(files[filekey][prev_i])\n', (6846, 6870), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((6872, 6911), 'os.path.dirname', 'os.path.dirname', (['files[filekey][curr_i]'], {}), '(files[filekey][curr_i])\n', (6887, 6911), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((9423, 9444), 'tensorflow.random.uniform', 'tf.random.uniform', (['[]'], {}), '([])\n', (9440, 9444), True, 'import tensorflow as tf\n'), ((11095, 11121), 'pickle.dump', 'pickle.dump', (['data_stats', 'f'], {}), '(data_stats, f)\n', (11106, 11121), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((11956, 11984), 'os.path.exists', 'os.path.exists', (["paths['png']"], {}), "(paths['png'])\n", (11970, 11984), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((11988, 12013), 'os.makedirs', 'os.makedirs', (["paths['png']"], {}), "(paths['png'])\n", (11999, 12013), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((12023, 12045), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (["paths['pdf']"], {}), "(paths['pdf'])\n", (12031, 12045), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((14006, 14036), 'tensorflow.name_scope', 'tf.name_scope', (['"""model_mercury"""'], {}), "('model_mercury')\n", (14019, 14036), True, 'import tensorflow as tf\n'), ((14476, 14508), 'tensorflow.name_scope', 'tf.name_scope', (['"""model_mars_moon"""'], {}), "('model_mars_moon')\n", (14489, 14508), True, 'import tensorflow as tf\n'), ((14537, 14575), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(nY, nX, nF)'}), '(shape=(nY, nX, nF))\n', (14555, 14575), False, 'from tensorflow import keras\n'), ((14969, 15005), 'tensorflow.keras.layers.add', 'keras.layers.add', (['[block_0, l_conv2]'], {}), '([block_0, l_conv2])\n', (14985, 15005), False, 'from tensorflow import keras\n'), ((15309, 15345), 'tensorflow.keras.layers.add', 'keras.layers.add', (['[block_1, l_conv4]'], {}), '([block_1, l_conv4])\n', (15325, 15345), False, 'from tensorflow import keras\n'), ((15649, 15685), 'tensorflow.keras.layers.add', 'keras.layers.add', (['[block_2, l_conv6]'], {}), '([block_2, l_conv6])\n', (15665, 15685), False, 'from tensorflow import keras\n'), ((15989, 16025), 'tensorflow.keras.layers.add', 'keras.layers.add', (['[block_3, l_conv8]'], {}), '([block_3, l_conv8])\n', (16005, 16025), False, 'from tensorflow import keras\n'), ((16329, 16365), 'tensorflow.keras.layers.add', 'keras.layers.add', (['[block_4, l_convA]'], {}), '([block_4, l_convA])\n', (16345, 16365), False, 'from tensorflow import keras\n'), ((16526, 16578), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'l_input', 'outputs': 'l_output'}), '(inputs=l_input, outputs=l_output)\n', (16544, 16578), False, 'from tensorflow import keras\n'), ((16615, 16650), 'tensorflow.name_scope', 'tf.name_scope', (['"""model_jupiter_moon"""'], {}), "('model_jupiter_moon')\n", (16628, 16650), True, 'import tensorflow as tf\n'), ((16679, 16717), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(nY, nX, nF)'}), '(shape=(nY, nX, nF))\n', (16697, 16717), False, 'from tensorflow import keras\n'), ((18215, 18267), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'l_input', 'outputs': 'l_output'}), '(inputs=l_input, outputs=l_output)\n', (18233, 18267), False, 'from tensorflow import keras\n'), ((18449, 18484), 'os.path.join', 'os.path.join', (['log_dir', '"""validation"""'], {}), "(log_dir, 'validation')\n", (18461, 18484), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((18512, 18545), 'os.path.join', 'os.path.join', (['log_dir', '"""training"""'], {}), "(log_dir, 'training')\n", (18524, 18545), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((18668, 18695), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (18693, 18695), False, 'from tensorflow.python.eager import context\n'), ((19116, 19143), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (19141, 19143), False, 'from tensorflow.python.eager import context\n'), ((21303, 21453), 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', (["paths['mck']"], {'monitor': '"""val_loss"""', 'verbose': '(0)', 'save_best_only': '(False)', 'save_weights_only': '(False)', 'mode': '"""auto"""', 'period': '(1)'}), "(paths['mck'], monitor='val_loss', verbose=0,\n save_best_only=False, save_weights_only=False, mode='auto', period=1)\n", (21334, 21453), False, 'from tensorflow import keras\n'), ((21459, 21509), 'tensorflow.keras.callbacks.LearningRateScheduler', 'keras.callbacks.LearningRateScheduler', (['lr_schedule'], {}), '(lr_schedule)\n', (21496, 21509), False, 'from tensorflow import keras\n'), ((4473, 4496), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4494, 4496), False, 'import shutil, tempfile, datetime\n'), ((5024, 5037), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (5031, 5037), True, 'import numpy as np\n'), ((9933, 9966), 'logging.FileHandler', 'logging.FileHandler', (["paths['log']"], {}), "(paths['log'])\n", (9952, 9966), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((12809, 12827), 'scipy.stats.describe', 'stats.describe', (['dd'], {}), '(dd)\n', (12823, 12827), False, 'from scipy import stats\n'), ((12840, 12852), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12850, 12852), True, 'import matplotlib.pyplot as plt\n'), ((12865, 12897), 'matplotlib.pyplot.hist', 'plt.hist', (['dd'], {'bins': '(100)', 'log': '(True)'}), '(dd, bins=100, log=True)\n', (12873, 12897), True, 'import matplotlib.pyplot as plt\n'), ((13137, 13167), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pdf'], {'format': '"""pdf"""'}), "(pdf, format='pdf')\n", (13148, 13167), True, 'import matplotlib.pyplot as plt\n'), ((13301, 13317), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (13310, 13317), True, 'import matplotlib.pyplot as plt\n'), ((13433, 13451), 'scipy.stats.describe', 'stats.describe', (['dd'], {}), '(dd)\n', (13447, 13451), False, 'from scipy import stats\n'), ((13464, 13476), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13474, 13476), True, 'import matplotlib.pyplot as plt\n'), ((13489, 13521), 'matplotlib.pyplot.hist', 'plt.hist', (['dd'], {'bins': '(100)', 'log': '(True)'}), '(dd, bins=100, log=True)\n', (13497, 13521), True, 'import matplotlib.pyplot as plt\n'), ((13761, 13791), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pdf'], {'format': '"""pdf"""'}), "(pdf, format='pdf')\n", (13772, 13791), True, 'import matplotlib.pyplot as plt\n'), ((13925, 13941), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (13934, 13941), True, 'import matplotlib.pyplot as plt\n'), ((14594, 14656), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5)', 'padding': '"""same"""'}), "(filters=32, kernel_size=5, padding='same')\n", (14613, 14656), False, 'from tensorflow import keras\n'), ((14684, 14708), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (14706, 14708), False, 'from tensorflow import keras\n'), ((14737, 14799), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5)', 'padding': '"""same"""'}), "(filters=32, kernel_size=5, padding='same')\n", (14756, 14799), False, 'from tensorflow import keras\n'), ((14827, 14851), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (14849, 14851), False, 'from tensorflow import keras\n'), ((14879, 14941), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5)', 'padding': '"""same"""'}), "(filters=32, kernel_size=5, padding='same')\n", (14898, 14941), False, 'from tensorflow import keras\n'), ((15024, 15048), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (15046, 15048), False, 'from tensorflow import keras\n'), ((15077, 15139), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5)', 'padding': '"""same"""'}), "(filters=32, kernel_size=5, padding='same')\n", (15096, 15139), False, 'from tensorflow import keras\n'), ((15167, 15191), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (15189, 15191), False, 'from tensorflow import keras\n'), ((15219, 15281), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5)', 'padding': '"""same"""'}), "(filters=32, kernel_size=5, padding='same')\n", (15238, 15281), False, 'from tensorflow import keras\n'), ((15364, 15388), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (15386, 15388), False, 'from tensorflow import keras\n'), ((15417, 15479), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5)', 'padding': '"""same"""'}), "(filters=32, kernel_size=5, padding='same')\n", (15436, 15479), False, 'from tensorflow import keras\n'), ((15507, 15531), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (15529, 15531), False, 'from tensorflow import keras\n'), ((15559, 15621), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5)', 'padding': '"""same"""'}), "(filters=32, kernel_size=5, padding='same')\n", (15578, 15621), False, 'from tensorflow import keras\n'), ((15704, 15728), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (15726, 15728), False, 'from tensorflow import keras\n'), ((15757, 15819), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5)', 'padding': '"""same"""'}), "(filters=32, kernel_size=5, padding='same')\n", (15776, 15819), False, 'from tensorflow import keras\n'), ((15847, 15871), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (15869, 15871), False, 'from tensorflow import keras\n'), ((15899, 15961), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5)', 'padding': '"""same"""'}), "(filters=32, kernel_size=5, padding='same')\n", (15918, 15961), False, 'from tensorflow import keras\n'), ((16044, 16068), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (16066, 16068), False, 'from tensorflow import keras\n'), ((16097, 16159), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5)', 'padding': '"""same"""'}), "(filters=32, kernel_size=5, padding='same')\n", (16116, 16159), False, 'from tensorflow import keras\n'), ((16187, 16211), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (16209, 16211), False, 'from tensorflow import keras\n'), ((16239, 16301), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5)', 'padding': '"""same"""'}), "(filters=32, kernel_size=5, padding='same')\n", (16258, 16301), False, 'from tensorflow import keras\n'), ((16384, 16408), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (16406, 16408), False, 'from tensorflow import keras\n'), ((16438, 16499), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(2)', 'kernel_size': '(5)', 'padding': '"""same"""'}), "(filters=2, kernel_size=5, padding='same')\n", (16457, 16499), False, 'from tensorflow import keras\n'), ((17094, 17132), 'tensorflow.keras.layers.add', 'keras.layers.add', (['[block[-1], l_conv1]'], {}), '([block[-1], l_conv1])\n', (17110, 17132), False, 'from tensorflow import keras\n'), ((17544, 17582), 'tensorflow.keras.layers.add', 'keras.layers.add', (['[block[-1], l_conv1]'], {}), '([block[-1], l_conv1])\n', (17560, 17582), False, 'from tensorflow import keras\n'), ((17994, 18032), 'tensorflow.keras.layers.add', 'keras.layers.add', (['[block[-1], l_conv1]'], {}), '([block[-1], l_conv1])\n', (18010, 18032), False, 'from tensorflow import keras\n'), ((18110, 18188), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(2)', 'kernel_size': '(5)', 'padding': '"""same"""', 'activation': 'None'}), "(filters=2, kernel_size=5, padding='same', activation=None)\n", (18129, 18188), False, 'from tensorflow import keras\n'), ((18727, 18782), 'tensorflow.contrib.summary.create_file_writer', 'tf.contrib.summary.create_file_writer', (['self.val_log_dir'], {}), '(self.val_log_dir)\n', (18764, 18782), True, 'import tensorflow as tf\n'), ((18827, 18876), 'tensorflow.compat.v1.summary.FileWriter', 'tf.compat.v1.summary.FileWriter', (['self.val_log_dir'], {}), '(self.val_log_dir)\n', (18858, 18876), True, 'import tensorflow as tf\n'), ((20092, 20135), 'tensorflow.keras.backend.eval', 'keras.backend.eval', (['self.model.optimizer.lr'], {}), '(self.model.optimizer.lr)\n', (20110, 20135), False, 'from tensorflow import keras\n'), ((20398, 20424), 'tensorflow.sysconfig.get_include', 'tf.sysconfig.get_include', ([], {}), '()\n', (20422, 20424), True, 'import tensorflow as tf\n'), ((20426, 20448), 'tensorflow.sysconfig.get_lib', 'tf.sysconfig.get_lib', ([], {}), '()\n', (20446, 20448), True, 'import tensorflow as tf\n'), ((7015, 7058), 'os.path.dirname', 'os.path.dirname', (['files[filekey][curr_i + j]'], {}), '(files[filekey][curr_i + j])\n', (7030, 7058), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((7058, 7097), 'os.path.dirname', 'os.path.dirname', (['files[filekey][curr_i]'], {}), '(files[filekey][curr_i])\n', (7073, 7097), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((9490, 9523), 'tensorflow.image.flip_left_right', 'tf.image.flip_left_right', (['asample'], {}), '(asample)\n', (9514, 9523), True, 'import tensorflow as tf\n'), ((12193, 12211), 'scipy.stats.describe', 'stats.describe', (['dd'], {}), '(dd)\n', (12207, 12211), False, 'from scipy import stats\n'), ((12228, 12240), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12238, 12240), True, 'import matplotlib.pyplot as plt\n'), ((12257, 12289), 'matplotlib.pyplot.hist', 'plt.hist', (['dd'], {'bins': '(100)', 'log': '(True)'}), '(dd, bins=100, log=True)\n', (12265, 12289), True, 'import matplotlib.pyplot as plt\n'), ((12522, 12552), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pdf'], {'format': '"""pdf"""'}), "(pdf, format='pdf')\n", (12533, 12552), True, 'import matplotlib.pyplot as plt\n'), ((12677, 12693), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (12686, 12693), True, 'import matplotlib.pyplot as plt\n'), ((14093, 14131), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(nY, nX, nF)'}), '(shape=(nY, nX, nF))\n', (14111, 14131), False, 'from tensorflow import keras\n'), ((14145, 14235), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5)', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(filters=32, kernel_size=5, padding='same', activation=\n tf.nn.relu)\n", (14164, 14235), False, 'from tensorflow import keras\n'), ((14244, 14334), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(5)', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(filters=64, kernel_size=5, padding='same', activation=\n tf.nn.relu)\n", (14263, 14334), False, 'from tensorflow import keras\n'), ((14343, 14421), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(2)', 'kernel_size': '(5)', 'padding': '"""same"""', 'activation': 'None'}), "(filters=2, kernel_size=5, padding='same', activation=None)\n", (14362, 14421), False, 'from tensorflow import keras\n'), ((16735, 16825), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5)', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(filters=32, kernel_size=5, padding='same', activation=\n tf.nn.relu)\n", (16754, 16825), False, 'from tensorflow import keras\n'), ((16881, 16971), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5)', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(filters=32, kernel_size=5, padding='same', activation=\n tf.nn.relu)\n", (16900, 16971), False, 'from tensorflow import keras\n'), ((17000, 17062), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'padding': '"""same"""'}), "(filters=32, kernel_size=3, padding='same')\n", (17019, 17062), False, 'from tensorflow import keras\n'), ((17240, 17330), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(5)', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(filters=64, kernel_size=5, padding='same', activation=\n tf.nn.relu)\n", (17259, 17330), False, 'from tensorflow import keras\n'), ((17359, 17421), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'padding': '"""same"""'}), "(filters=64, kernel_size=3, padding='same')\n", (17378, 17421), False, 'from tensorflow import keras\n'), ((17690, 17780), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5)', 'padding': '"""same"""', 'activation': 'tf.nn.relu'}), "(filters=32, kernel_size=5, padding='same', activation=\n tf.nn.relu)\n", (17709, 17780), False, 'from tensorflow import keras\n'), ((17809, 17871), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'padding': '"""same"""'}), "(filters=32, kernel_size=3, padding='same')\n", (17828, 17871), False, 'from tensorflow import keras\n'), ((19192, 19236), 'tensorflow.contrib.summary.always_record_summaries', 'tf.contrib.summary.always_record_summaries', ([], {}), '()\n', (19234, 19236), True, 'import tensorflow as tf\n'), ((19457, 19479), 'tensorflow.compat.v1.Summary', 'tf.compat.v1.Summary', ([], {}), '()\n', (19477, 19479), True, 'import tensorflow as tf\n'), ((5380, 5441), 'numpy.zeros', 'np.zeros', ([], {'shape': "(head['dimZ'], head['dimY'], head['dimX'], 1)"}), "(shape=(head['dimZ'], head['dimY'], head['dimX'], 1))\n", (5388, 5441), True, 'import numpy as np\n'), ((12910, 12919), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12917, 12919), True, 'import matplotlib.pyplot as plt\n'), ((13534, 13543), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13541, 13543), True, 'import matplotlib.pyplot as plt\n'), ((17155, 17179), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (17177, 17179), False, 'from tensorflow import keras\n'), ((17464, 17510), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(1)'}), '(filters=64, kernel_size=1)\n', (17483, 17510), False, 'from tensorflow import keras\n'), ((17605, 17629), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (17627, 17629), False, 'from tensorflow import keras\n'), ((17914, 17960), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(1)'}), '(filters=32, kernel_size=1)\n', (17933, 17960), False, 'from tensorflow import keras\n'), ((18055, 18079), 'tensorflow.keras.layers.LeakyReLU', 'keras.layers.LeakyReLU', ([], {}), '()\n', (18077, 18079), False, 'from tensorflow import keras\n'), ((20758, 20822), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(valid_inputs, valid_labels)'], {}), '((valid_inputs, valid_labels))\n', (20792, 20822), True, 'import tensorflow as tf\n'), ((12306, 12315), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12313, 12315), True, 'import matplotlib.pyplot as plt\n'), ((13015, 13024), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13022, 13024), True, 'import matplotlib.pyplot as plt\n'), ((13253, 13283), 'os.path.normpath', 'os.path.normpath', (["paths['png']"], {}), "(paths['png'])\n", (13269, 13283), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((13639, 13648), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13646, 13648), True, 'import matplotlib.pyplot as plt\n'), ((13877, 13907), 'os.path.normpath', 'os.path.normpath', (["paths['png']"], {}), "(paths['png'])\n", (13893, 13907), False, 'import os, sys, glob, pickle, argparse, logging\n'), ((12411, 12420), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12418, 12420), True, 'import matplotlib.pyplot as plt\n'), ((12622, 12652), 'os.path.normpath', 'os.path.normpath', (["paths['png']"], {}), "(paths['png'])\n", (12638, 12652), False, 'import os, sys, glob, pickle, argparse, logging\n')] |
import tensorflow as tf
import gym
from tqdm import tqdm
import numpy as np
import random
from src.agent.agent import BaseAgent
from src.agent.k_step_planning_agent_with_retrain import KStepPlanningAgent
class DDPGAgent(BaseAgent):
def __init__(self, sess, action_type, actor, critic, gamma, env, replay_buffer, noise=None, exploration_episodes=10000, max_episodes=10000, max_steps_episode=10000,\
warmup_steps=5000, mini_batch=32, eval_episodes=10, eval_periods=100, env_render=False, summary_dir=None):
"""
Deep Deterministic Policy Gradient Agent.
Args:
actor: actor network.
critic: critic network.
gamma: discount factor.
"""
super(DDPGAgent, self).__init__(sess, env, replay_buffer, noise=noise, exploration_episodes=exploration_episodes, max_episodes=max_episodes, max_steps_episode=max_steps_episode,\
warmup_steps=warmup_steps, mini_batch=mini_batch, eval_episodes=eval_episodes, eval_periods=eval_periods, env_render=env_render, summary_dir=summary_dir)
self.action_type = action_type
self.actor = actor
self.critic = critic
self.gamma = gamma
self.planning_agent = KStepPlanningAgent(env)
def train(self, use_k_step = 0.1, load = False):
# Initialize target network weights
self.actor.update_target_network()
self.critic.update_target_network()
max_reward = -99999
if load:
self.load()
for cur_episode in tqdm(range(self.max_episodes)):
# evaluate here.
if cur_episode % self.eval_periods == 0:
self.evaluate(cur_episode)
state = self.env.reset()
episode_reward = 0
episode_ave_max_q = 0
for cur_step in range(self.max_steps_episode):
if self.env_render:
self.env.render()
# Add exploratory noise according to Ornstein-Uhlenbeck process to action
if self.replay_buffer.size() < self.warmup_steps:
action = self.env.action_space.sample()
else:
if self.action_type == 'Continuous':
if cur_episode < self.exploration_episodes and self.noise is not None:
if random.random() < use_k_step:
action, _ = self.planning_agent.step(state)
else:
action = np.clip(self.actor.predict(np.expand_dims(state, 0))[0] + self.noise.generate(cur_episode), -1, 1)
else:
action = self.actor.predict(np.expand_dims(state, 0))[0]
else:
action = self.noise.generate(self.actor.predict(np.expand_dims(state, 0))[0,0], cur_episode)
next_state, reward, terminal, info = self.env.step(action)
new_observations_actions = np.concatenate( (np.array(state), np.array(action)) )
new_target = np.concatenate( (np.array(next_state), np.array([reward])) )
self.planning_agent.extra_observations_actions.append(new_observations_actions)
self.planning_agent.extra_targets.append(new_target)
self.replay_buffer.add(state, action, reward, terminal, next_state)
# Keep adding experience to the memory until there are at least minibatch size samples
if self.replay_buffer.size() > self.warmup_steps:
state_batch, action_batch, reward_batch, terminal_batch, next_state_batch = \
self.replay_buffer.sample_batch(self.mini_batch)
# Calculate targets
target_q = self.critic.predict_target(next_state_batch, self.actor.predict_target(next_state_batch))
y_i = np.reshape(reward_batch, (self.mini_batch, 1)) + (1 \
- np.reshape(terminal_batch, (self.mini_batch, 1)).astype(float))\
* self.gamma * np.reshape(target_q, (self.mini_batch, 1))
# Update the critic given the targets
if self.action_type == 'Discrete':
action_batch = np.reshape(action_batch, [self.mini_batch, 1])
predicted_q_value, _ = self.critic.train(state_batch, action_batch, y_i)
episode_ave_max_q += np.amax(predicted_q_value)
# Update the actor policy using the sampled gradient
if self.action_type == 'Continuous':
a_outs = self.actor.predict(state_batch)
a_grads = self.critic.action_gradients(state_batch, a_outs)
self.actor.train(state_batch, a_grads[0])
else:
a_outs = self.actor.predict(state_batch)
a_grads = self.critic.action_gradients(state_batch, a_outs)
self.actor.train(state_batch, a_grads[0])
# Update target networks
self.actor.update_target_network()
self.critic.update_target_network()
state = next_state
episode_reward += reward
if terminal or cur_step == self.max_steps_episode-1:
train_episode_summary = tf.Summary()
train_episode_summary.value.add(simple_value=episode_reward, tag="train/episode_reward")
train_episode_summary.value.add(simple_value=episode_ave_max_q/float(cur_step), tag="train/episode_ave_max_q")
self.writer.add_summary(train_episode_summary, cur_episode)
self.writer.flush()
if (episode_reward > max_reward):
max_reward = episode_reward
self.save()
print ('Reward: %.2i' % int(episode_reward), ' | Episode', cur_episode, '| Qmax: %.4f' % (episode_ave_max_q / float(cur_step)))
break
def evaluate(self, cur_episode):
# evaluate here.
total_episode_reward = 0
for eval_i in range(self.eval_episodes):
state = self.env.reset()
terminal = False
while not terminal:
if self.action_type == 'Continuous':
action = self.actor.predict(np.expand_dims(state, 0))[0]
else:
action = self.actor.predict(np.expand_dims(state, 0))[0,0]
state, reward, terminal, info = self.env.step(action)
total_episode_reward += reward
ave_episode_reward = total_episode_reward / float(self.eval_episodes)
print("\nAverage reward {}\n".format(ave_episode_reward))
# Add ave reward to Tensorboard
eval_episode_summary = tf.Summary()
eval_episode_summary.value.add(simple_value=ave_episode_reward, tag="eval/reward")
self.writer.add_summary(eval_episode_summary, cur_episode)
def save(self):
saver = tf.train.Saver()
save_path = saver.save(self.sess, "./tmp/model/model.ckpt")
def load(self, path = "./tmp/model/model.ckpt"):
saver = tf.train.Saver()
saver.restore(sess, path)
| [
"tensorflow.Summary",
"numpy.reshape",
"tensorflow.train.Saver",
"src.agent.k_step_planning_agent_with_retrain.KStepPlanningAgent",
"numpy.array",
"numpy.expand_dims",
"random.random",
"numpy.amax"
] | [((1226, 1249), 'src.agent.k_step_planning_agent_with_retrain.KStepPlanningAgent', 'KStepPlanningAgent', (['env'], {}), '(env)\n', (1244, 1249), False, 'from src.agent.k_step_planning_agent_with_retrain import KStepPlanningAgent\n'), ((6981, 6993), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (6991, 6993), True, 'import tensorflow as tf\n'), ((7194, 7210), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (7208, 7210), True, 'import tensorflow as tf\n'), ((7354, 7370), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (7368, 7370), True, 'import tensorflow as tf\n'), ((4516, 4542), 'numpy.amax', 'np.amax', (['predicted_q_value'], {}), '(predicted_q_value)\n', (4523, 4542), True, 'import numpy as np\n'), ((5479, 5491), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (5489, 5491), True, 'import tensorflow as tf\n'), ((3039, 3054), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (3047, 3054), True, 'import numpy as np\n'), ((3056, 3072), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (3064, 3072), True, 'import numpy as np\n'), ((3122, 3142), 'numpy.array', 'np.array', (['next_state'], {}), '(next_state)\n', (3130, 3142), True, 'import numpy as np\n'), ((3144, 3162), 'numpy.array', 'np.array', (['[reward]'], {}), '([reward])\n', (3152, 3162), True, 'import numpy as np\n'), ((3946, 3992), 'numpy.reshape', 'np.reshape', (['reward_batch', '(self.mini_batch, 1)'], {}), '(reward_batch, (self.mini_batch, 1))\n', (3956, 3992), True, 'import numpy as np\n'), ((4334, 4380), 'numpy.reshape', 'np.reshape', (['action_batch', '[self.mini_batch, 1]'], {}), '(action_batch, [self.mini_batch, 1])\n', (4344, 4380), True, 'import numpy as np\n'), ((4138, 4180), 'numpy.reshape', 'np.reshape', (['target_q', '(self.mini_batch, 1)'], {}), '(target_q, (self.mini_batch, 1))\n', (4148, 4180), True, 'import numpy as np\n'), ((6519, 6543), 'numpy.expand_dims', 'np.expand_dims', (['state', '(0)'], {}), '(state, 0)\n', (6533, 6543), True, 'import numpy as np\n'), ((6618, 6642), 'numpy.expand_dims', 'np.expand_dims', (['state', '(0)'], {}), '(state, 0)\n', (6632, 6642), True, 'import numpy as np\n'), ((2362, 2377), 'random.random', 'random.random', ([], {}), '()\n', (2375, 2377), False, 'import random\n'), ((2730, 2754), 'numpy.expand_dims', 'np.expand_dims', (['state', '(0)'], {}), '(state, 0)\n', (2744, 2754), True, 'import numpy as np\n'), ((2858, 2882), 'numpy.expand_dims', 'np.expand_dims', (['state', '(0)'], {}), '(state, 0)\n', (2872, 2882), True, 'import numpy as np\n'), ((4030, 4078), 'numpy.reshape', 'np.reshape', (['terminal_batch', '(self.mini_batch, 1)'], {}), '(terminal_batch, (self.mini_batch, 1))\n', (4040, 4078), True, 'import numpy as np\n'), ((2570, 2594), 'numpy.expand_dims', 'np.expand_dims', (['state', '(0)'], {}), '(state, 0)\n', (2584, 2594), True, 'import numpy as np\n')] |
import os
import pickle
from logging import getLogger
import numpy as np
import pandas as pd
import cv2
import torch
from tqdm import tqdm
from scipy.interpolate import InterpolatedUnivariateSpline
from car_motion_attack.model_scnn import SCNNOpenPilot
from car_motion_attack.model_ultrafast import UltraFastOpenPilot
from car_motion_attack.model_polylanenet import PolyLaneNetOpenPilot
from car_motion_attack.config import MODEL_IMG_HEIGHT, MODEL_IMG_WIDTH
from car_motion_attack.car_motion import CarMotion
from car_motion_attack.load_sensor_data import load_sensor_data
from car_motion_attack.utils import AdamOpt, yuv2rgb, rgb2yuv
from car_motion_attack.replay_bicycle import ReplayBicycle
#from car_motion_attack.loss import compute_path_pinv, loss_func
from car_motion_attack.config import (DTYPE, PIXELS_PER_METER, SKY_HEIGHT, IMG_INPUT_SHAPE,
IMG_INPUT_MASK_SHAPE, RNN_INPUT_SHAPE,
MODEL_DESIRE_INPUT_SHAPE, MODEL_OUTPUT_SHAPE,
YUV_MIN, YUV_MAX
)
N_PREDICTIONS = 192
logger = getLogger(None)
class CarMotionAttack:
def __init__(
self,
list_bgr_img,
df_sensors,
global_bev_mask,
base_color,
roi_mat,
n_epoch=10000,
learning_rate_patch=1.0e-2,
learning_rate_color=1.0e-3,
scale=1,
result_dir='./result/',
perturbable_area_ratio=10,
is_attack_to_rigth=True,
left_lane_pos=4,
right_lane_pos=36,
src_corners=None,
target_deviation=0.5,
l2_weight=0.01,
target='laneatt',
ext_mat=None,
):
self.list_bgr_img = list_bgr_img
self.n_frames = len(list_bgr_img)
self.df_sensors = df_sensors
self.result_dir = result_dir
self.perturbable_area_ratio = perturbable_area_ratio
self.base_color = base_color
self.roi_mat = roi_mat
self.is_attack_to_rigth = is_attack_to_rigth
self.left_lane_pos = left_lane_pos
self.right_lane_pos = right_lane_pos
self.target_deviation = target_deviation
self.l2_weight = l2_weight
self.scale = scale
self.last_epoch = None
self.global_bev_mask = global_bev_mask
self.car_motion = CarMotion(
self.list_bgr_img,
self.df_sensors,
self.global_bev_mask,
self.roi_mat,
left_lane_pos=left_lane_pos,
right_lane_pos=right_lane_pos,
scale=scale,
src_corners=src_corners,
ext_mat=ext_mat
)
self.global_bev_purtabation = (
np.random.random(
(self.global_bev_mask.shape[0], self.global_bev_mask.shape[1], 6),
).astype(DTYPE)
* 1.0e-8
)
self.masked_global_bev_purtabation = self.global_bev_purtabation.copy()
self.global_base_color = np.array(
[base_color, 0, 0], dtype=DTYPE
) # np.zeros(3, dtype=DTYPE)
if target == 'scnn':
self.model = SCNNOpenPilot(ext_mat, mtx_bev2camera=self.car_motion.mtx_bev2camera)
elif target == 'laneatt':
from car_motion_attack.model_laneatt import LaneATTOpenPilot
self.model = LaneATTOpenPilot(ext_mat, mtx_bev2camera=self.car_motion.mtx_bev2camera)
elif target == 'ultrafast':
self.model = UltraFastOpenPilot(ext_mat, mtx_bev2camera=self.car_motion.mtx_bev2camera)
elif target == 'polylanenet':
self.model = PolyLaneNetOpenPilot(ext_mat, mtx_bev2camera=self.car_motion.mtx_bev2camera)
elif target == 'nan':
self.model = None
else:
raise Exception(f'Invalid target: {target}')
self.n_epoch = n_epoch + 1
self.learning_rate_patch = learning_rate_patch
self.learning_rate_color = learning_rate_color
#self._create_tf_variables()
def run(
self,
lateral_shift=4,
starting_meters=60,
starting_steering_angle=True,
starting_patch_dir=None,
starting_patch_epoch=None,
trajectory_update=True
):
logger.debug("enter")
# initialize car model
self.car_motion.setup_masks(
lateral_shift=lateral_shift, starting_meters=starting_meters
)
if self.base_color is None:
bev_img = self.car_motion.list_transform[0].bev_image
bev_mask = self.car_motion.list_frame_mask[0].bev_mask
self.base_color = rgb2yuv(np.array([[bev_img[bev_mask > 0].mean(axis=0).astype(int)]* 2] * 2))[0, 0, 0]
self.global_base_color = np.array(
[self.base_color, 0, 0], dtype=DTYPE
) # np.zeros(3, dtype=DTYPE)
# ops
if starting_patch_dir is not None:
self.global_bev_purtabation = np.load(
starting_patch_dir + f"_global_patch_{starting_patch_epoch}.npy"
)
self.masked_global_bev_purtabation = np.load(
starting_patch_dir + f"_global_masked_patch_{starting_patch_epoch}.npy"
)
self.global_base_color = np.load(
starting_patch_dir + f"_global_base_color_{starting_patch_epoch}.npy"
)
adam_patch = AdamOpt(
yuv2rgb(self.global_bev_purtabation).shape, lr=self.learning_rate_patch
)
color_6ch = np.array([self.global_base_color[0]] * 4 + [self.global_base_color[1]] + [self.global_base_color[2]])
#self.sess.run(
# self.ops_base_color_update,
# feed_dict={self.yuv_color: self.global_base_color},
#)
# optimization iteration
for epoch in tqdm(range(self.n_epoch)):
#for epoch in tqdm(range(80)):
logger.debug("start {}".format(epoch))
logger.debug("calc model ouput")
#model_img_inputs = self.car_motion.calc_model_inputs_rgb()
logger.debug("apply global purtabation to each frame")
patch_yuv = self.masked_global_bev_purtabation + color_6ch
patch_rgb = yuv2rgb(patch_yuv).clip(0, 255)
#list_patches = self.car_motion.conv_patch2camera(patch_rgb)
list_attacked_input = self.car_motion.calc_attacked_model_inputs_rgb(patch_rgb)
logger.debug("update car trajectory")
model_attack_outputs = []#np.vstack(self.sess.run(self.list_ops_predicts))
#model_seg_pred = []
model_output = np.ones(1760)
if trajectory_update:
for i in range(self.n_frames):
try:
model_output = self.model.predict(list_attacked_input[i])
except:
pass
model_attack_outputs.append(model_output)
#model_seg_pred.append(self.model.seg_pred)
model_attack_outputs = np.vstack(model_attack_outputs)
self.car_motion.update_trajectory(
model_attack_outputs, start_steering_angle=starting_steering_angle, add_noise=True
)
else:
model_attack_outputs = None
self.car_motion.apply_noise()
### for debug
#with open(self.result_dir + 'model_attack_input.pkl', 'wb') as f:
# pickle.dump(list_attacked_input, f, -1)
#with open(self.result_dir + 'model_attack_outputs.pkl', 'wb') as f:
# pickle.dump(model_attack_outputs, f, -1)
#with open(self.result_dir + 'model_seg_pred.pkl', 'wb') as f:
# pickle.dump(model_seg_pred, f, -1)
#if self.car_motion.list_desired_steering_angle[0] < -20:
# break
#import pdb;pdb.set_trace()
logger.debug("calc gradients")
list_var_grad = [self.model.get_input_gradient(list_attacked_input[i])
for i in range(self.n_frames)]
list_var_grad = np.stack(list_var_grad)
#np.save('list_var_grad', list_var_grad)
logger.debug("conv gradients -> patch")
logger.debug("agg patch grads")
patch_grad = self._agg_gradients(list_var_grad)
patch_grad = np.sign(patch_grad) * 255
#np.save('patch_grad', patch_grad)
logger.debug("update global purtabation")
patch_rgb = (patch_rgb - adam_patch.update(patch_grad / 255) * 255).clip(0, 255)
adam_patch.lr *= 0.99
#patch_rgb = (patch_rgb - patch_grad * self.learning_rate_patch).clip(0, 255)
patch_yuv = rgb2yuv(patch_rgb)
perturb_yuv = patch_yuv - color_6ch
perturb_yuv -= self.learning_rate_patch * 2 * self.l2_weight * perturb_yuv
self.global_bev_purtabation = perturb_yuv #np.where(np.isnan(perturb_yuv), self.global_bev_purtabation, perturb_yuv)
self.global_bev_purtabation[:, :, 4:] = 0
if (epoch) % min(10, self.n_epoch - 1) == 0:
patch_diff = self.global_bev_purtabation.clip(0, None).sum(axis=2)
patch_diff += np.random.random(patch_diff.shape) * 1.0e-8 # tie break
threshold = np.percentile(patch_diff, 100 - self.perturbable_area_ratio)
mask_bev_purtabation = patch_diff > threshold
self.masked_global_bev_purtabation = self.global_bev_purtabation.copy()
self.masked_global_bev_purtabation[~mask_bev_purtabation] = 0.
else:
self.masked_global_bev_purtabation = self.global_bev_purtabation.copy()
self.masked_global_bev_purtabation = self.masked_global_bev_purtabation.clip(
0, 0.83122042#- self.base_color
)
if (epoch) % min(50, self.n_epoch - 1) == 0 and epoch > 0:
np.save(
self.result_dir + f"_global_patch_{epoch}",
self.global_bev_purtabation,
)
np.save(
self.result_dir + f"_global_masked_patch_{epoch}",
self.masked_global_bev_purtabation,
)
np.save(
self.result_dir + f"_global_base_color_{epoch}",
self.global_base_color,
)
#np.save(
# self.result_dir + f"model_img_inputs_{epoch}",
# np.stack(list_attacked_input),
#)
#model_imgs = np.vstack(self.sess.run(self.list_ops_model_img))
np.save(
self.result_dir + f"model_outputs_{epoch}", model_attack_outputs
)
#np.save(self.result_dir + f"model_img_inputs_{epoch}", model_imgs)
#logger.info(
# f"save epoch: {epoch + 1}, total_lat: {self.car_motion.list_total_lateral_shift} desired: {self.car_motion.list_desired_steering_angle}"
#)
if trajectory_update:
#if (
# (self.is_attack_to_rigth and self.car_motion.list_lateral_shift_openpilot[-1] < - self.target_deviation) or
# ((not self.is_attack_to_rigth)
# and self.car_motion.list_lateral_shift_openpilot[-1] > self.target_deviation)
#):
if np.abs(self.car_motion.list_lateral_shift_openpilot).max() > self.target_deviation:
logger.info(
f"Reached target deviation: {epoch + 1}, total_lat: {self.car_motion.list_lateral_shift_openpilot[-1]}"
)
self.last_epoch = epoch
break
self.last_epoch = epoch
logger.debug("exit")
def replay(
self,
epoch,
lateral_shift=4,
starting_meters=60,
starting_steering_angle=None,
trajectory_update=True
):
logger.debug("enter")
output_dir = self.result_dir + '/replay/'
# initialize car model
self.car_motion.setup_masks(
lateral_shift=lateral_shift, starting_meters=starting_meters
)
self.global_bev_purtabation = np.load(
self.result_dir + f"_global_patch_{epoch}.npy"
)
self.masked_global_bev_purtabation = np.load(
self.result_dir + f"_global_masked_patch_{epoch}.npy"
)
self.global_base_color = np.load(
self.result_dir + f"_global_base_color_{epoch}.npy"
)
color_6ch = np.array([self.global_base_color[0]] * 4 + [self.global_base_color[1]] + [self.global_base_color[2]])
patch_yuv = self.masked_global_bev_purtabation + color_6ch
patch_rgb = yuv2rgb(patch_yuv).clip(0, 255)
model_image_inputs = []
model_outputs = []
model_lane_pred = []
def pred_generator():
model_output = np.zeros(MODEL_OUTPUT_SHAPE[1:])
for i in range(self.n_frames):
patch_model = self.car_motion.calc_attacked_model_inputs_rgb_each(
i, patch_rgb
)
try:
model_output = self.model.predict(patch_model)
except:
pass
model_image_inputs.append(patch_model)
model_outputs.append(model_output)
model_lane_pred.append([self.model.left_line, self.model.right_line])
yield model_output
if trajectory_update:
self.car_motion.update_trajectory_gen(
pred_generator(), start_steering_angle=starting_steering_angle # with limit
)
else:
for _ in pred_generator():
pass
### for debug
with open(output_dir + 'model_attack_input.pkl', 'wb') as f:
pickle.dump(model_image_inputs, f, -1)
with open(output_dir + 'model_attack_outputs.pkl', 'wb') as f:
pickle.dump(model_outputs, f, -1)
with open(output_dir + 'model_lane_pred.pkl', 'wb') as f:
pickle.dump(model_lane_pred, f, -1)
with open(output_dir + 'global_patch.pkl', 'wb') as f:
pickle.dump(self.global_bev_purtabation, f, -1)
with open(output_dir + 'global_masked_patch.pkl', 'wb') as f:
pickle.dump(self.masked_global_bev_purtabation, f, -1)
with open(output_dir + 'global_base_color.pkl', 'wb') as f:
pickle.dump(self.global_base_color, f, -1)
self.last_epoch = epoch
logger.debug("exit")
def calc_metric(
self,
epoch,
target_frame,
lateral_shift=4,
starting_meters=60,
starting_steering_angle=None,
trajectory_update=True
):
logger.debug("enter")
output_dir = self.result_dir + '/replay/'
# initialize car model
self.car_motion.setup_masks(
lateral_shift=lateral_shift, starting_meters=starting_meters
)
self.global_bev_purtabation = np.load(
self.result_dir + f"_global_patch_{epoch}.npy"
)
self.masked_global_bev_purtabation = np.load(
self.result_dir + f"_global_masked_patch_{epoch}.npy"
)
self.global_base_color = np.load(
self.result_dir + f"_global_base_color_{epoch}.npy"
)
color_6ch = np.array([self.global_base_color[0]] * 4 + [self.global_base_color[1]] + [self.global_base_color[2]])
patch_yuv = self.masked_global_bev_purtabation + color_6ch
patch_rgb = yuv2rgb(patch_yuv).clip(0, 255)
model_image_inputs = []
model_outputs = []
model_lane_pred = []
def pred_generator():
model_output = np.zeros(MODEL_OUTPUT_SHAPE[1:])
gt_pos = self.df_sensors[['longitude_shift', 'lateral_shift']].values
for i in range(self.n_frames):
if i <= target_frame:
patch_model = self.car_motion.calc_attacked_model_inputs_rgb_each(
i, patch_rgb
)
try:
model_output = self.model.predict(patch_model)
except:
pass
model_image_inputs.append(patch_model)
else:
# centered vehicle
pos = np.array(gt_pos) - [gt_pos[i, 0], self.car_motion.total_lateral_shift]
# apply yaw
_cos = np.cos(self.car_motion.yaw)
_sin = - np.sin(self.car_motion.yaw)
mat_rotate = np.array([[_cos, -_sin], [_sin, _cos]])
pos = np.dot(pos, mat_rotate.T)
# interpolate
cs_path = InterpolatedUnivariateSpline(pos[:, 0], pos[:, 1], k=3, ext=3)
# draw
# 0.0585227139336881
x = np.arange(N_PREDICTIONS) - 12
y = cs_path(x) - 0.05# + (self.car_motion.toal_lateral_shift)# - lateral_pos[i])
path_start = 0
left_start = N_PREDICTIONS * 2
right_start = N_PREDICTIONS * 2 + N_PREDICTIONS * 2 + 1
model_output = np.ones(1760)
model_output[path_start:path_start + N_PREDICTIONS] = y
model_output[left_start:left_start + N_PREDICTIONS] = y
model_output[right_start:right_start + N_PREDICTIONS] = y
model_outputs.append(model_output)
model_lane_pred.append([self.model.left_line, self.model.right_line])
yield model_output
if trajectory_update:
self.car_motion.update_trajectory_gen(
pred_generator(), start_steering_angle=starting_steering_angle, target_frame=target_frame # with limit
)
else:
for _ in pred_generator():
pass
self.last_epoch = epoch
logger.debug("exit")
def _agg_gradients(self, list_var_grad):
"""
model_mask_areas = np.array(
[m.sum() for m in self.car_motion.get_all_camera_masks()]
)
weights = model_mask_areas / model_mask_areas.sum()
list_patch_grad = self.car_motion.conv_camera2patch(
list_var_grad
) # zero is missing value
for i in range(len(list_patch_grad)):
list_patch_grad[i] *= weights[i]
tmp = np.stack(list_patch_grad)
tmp = np.nanmean(tmp, axis=0)
tmp[np.isnan(tmp)] = 0
"""
list_patch_grad = self.car_motion.conv_camera2patch(
list_var_grad
) # zero is missing value
tmp = np.nanmean(list_patch_grad, axis=0)
tmp[np.isnan(tmp)] = 0
return tmp
| [
"logging.getLogger",
"car_motion_attack.model_polylanenet.PolyLaneNetOpenPilot",
"numpy.array",
"numpy.nanmean",
"numpy.sin",
"numpy.save",
"numpy.arange",
"car_motion_attack.car_motion.CarMotion",
"car_motion_attack.utils.rgb2yuv",
"numpy.random.random",
"car_motion_attack.model_ultrafast.Ultra... | [((1140, 1155), 'logging.getLogger', 'getLogger', (['None'], {}), '(None)\n', (1149, 1155), False, 'from logging import getLogger\n'), ((2360, 2565), 'car_motion_attack.car_motion.CarMotion', 'CarMotion', (['self.list_bgr_img', 'self.df_sensors', 'self.global_bev_mask', 'self.roi_mat'], {'left_lane_pos': 'left_lane_pos', 'right_lane_pos': 'right_lane_pos', 'scale': 'scale', 'src_corners': 'src_corners', 'ext_mat': 'ext_mat'}), '(self.list_bgr_img, self.df_sensors, self.global_bev_mask, self.\n roi_mat, left_lane_pos=left_lane_pos, right_lane_pos=right_lane_pos,\n scale=scale, src_corners=src_corners, ext_mat=ext_mat)\n', (2369, 2565), False, 'from car_motion_attack.car_motion import CarMotion\n'), ((3002, 3043), 'numpy.array', 'np.array', (['[base_color, 0, 0]'], {'dtype': 'DTYPE'}), '([base_color, 0, 0], dtype=DTYPE)\n', (3010, 3043), True, 'import numpy as np\n'), ((5524, 5630), 'numpy.array', 'np.array', (['([self.global_base_color[0]] * 4 + [self.global_base_color[1]] + [self.\n global_base_color[2]])'], {}), '([self.global_base_color[0]] * 4 + [self.global_base_color[1]] + [\n self.global_base_color[2]])\n', (5532, 5630), True, 'import numpy as np\n'), ((12422, 12477), 'numpy.load', 'np.load', (["(self.result_dir + f'_global_patch_{epoch}.npy')"], {}), "(self.result_dir + f'_global_patch_{epoch}.npy')\n", (12429, 12477), True, 'import numpy as np\n'), ((12545, 12607), 'numpy.load', 'np.load', (["(self.result_dir + f'_global_masked_patch_{epoch}.npy')"], {}), "(self.result_dir + f'_global_masked_patch_{epoch}.npy')\n", (12552, 12607), True, 'import numpy as np\n'), ((12663, 12723), 'numpy.load', 'np.load', (["(self.result_dir + f'_global_base_color_{epoch}.npy')"], {}), "(self.result_dir + f'_global_base_color_{epoch}.npy')\n", (12670, 12723), True, 'import numpy as np\n'), ((12767, 12873), 'numpy.array', 'np.array', (['([self.global_base_color[0]] * 4 + [self.global_base_color[1]] + [self.\n global_base_color[2]])'], {}), '([self.global_base_color[0]] * 4 + [self.global_base_color[1]] + [\n self.global_base_color[2]])\n', (12775, 12873), True, 'import numpy as np\n'), ((15298, 15353), 'numpy.load', 'np.load', (["(self.result_dir + f'_global_patch_{epoch}.npy')"], {}), "(self.result_dir + f'_global_patch_{epoch}.npy')\n", (15305, 15353), True, 'import numpy as np\n'), ((15421, 15483), 'numpy.load', 'np.load', (["(self.result_dir + f'_global_masked_patch_{epoch}.npy')"], {}), "(self.result_dir + f'_global_masked_patch_{epoch}.npy')\n", (15428, 15483), True, 'import numpy as np\n'), ((15539, 15599), 'numpy.load', 'np.load', (["(self.result_dir + f'_global_base_color_{epoch}.npy')"], {}), "(self.result_dir + f'_global_base_color_{epoch}.npy')\n", (15546, 15599), True, 'import numpy as np\n'), ((15643, 15749), 'numpy.array', 'np.array', (['([self.global_base_color[0]] * 4 + [self.global_base_color[1]] + [self.\n global_base_color[2]])'], {}), '([self.global_base_color[0]] * 4 + [self.global_base_color[1]] + [\n self.global_base_color[2]])\n', (15651, 15749), True, 'import numpy as np\n'), ((19071, 19106), 'numpy.nanmean', 'np.nanmean', (['list_patch_grad'], {'axis': '(0)'}), '(list_patch_grad, axis=0)\n', (19081, 19106), True, 'import numpy as np\n'), ((3149, 3218), 'car_motion_attack.model_scnn.SCNNOpenPilot', 'SCNNOpenPilot', (['ext_mat'], {'mtx_bev2camera': 'self.car_motion.mtx_bev2camera'}), '(ext_mat, mtx_bev2camera=self.car_motion.mtx_bev2camera)\n', (3162, 3218), False, 'from car_motion_attack.model_scnn import SCNNOpenPilot\n'), ((4745, 4791), 'numpy.array', 'np.array', (['[self.base_color, 0, 0]'], {'dtype': 'DTYPE'}), '([self.base_color, 0, 0], dtype=DTYPE)\n', (4753, 4791), True, 'import numpy as np\n'), ((4968, 5041), 'numpy.load', 'np.load', (["(starting_patch_dir + f'_global_patch_{starting_patch_epoch}.npy')"], {}), "(starting_patch_dir + f'_global_patch_{starting_patch_epoch}.npy')\n", (4975, 5041), True, 'import numpy as np\n'), ((5121, 5206), 'numpy.load', 'np.load', (["(starting_patch_dir + f'_global_masked_patch_{starting_patch_epoch}.npy')"], {}), "(starting_patch_dir + f'_global_masked_patch_{starting_patch_epoch}.npy'\n )\n", (5128, 5206), True, 'import numpy as np\n'), ((5269, 5347), 'numpy.load', 'np.load', (["(starting_patch_dir + f'_global_base_color_{starting_patch_epoch}.npy')"], {}), "(starting_patch_dir + f'_global_base_color_{starting_patch_epoch}.npy')\n", (5276, 5347), True, 'import numpy as np\n'), ((6620, 6633), 'numpy.ones', 'np.ones', (['(1760)'], {}), '(1760)\n', (6627, 6633), True, 'import numpy as np\n'), ((8144, 8167), 'numpy.stack', 'np.stack', (['list_var_grad'], {}), '(list_var_grad)\n', (8152, 8167), True, 'import numpy as np\n'), ((8788, 8806), 'car_motion_attack.utils.rgb2yuv', 'rgb2yuv', (['patch_rgb'], {}), '(patch_rgb)\n', (8795, 8806), False, 'from car_motion_attack.utils import AdamOpt, yuv2rgb, rgb2yuv\n'), ((13136, 13168), 'numpy.zeros', 'np.zeros', (['MODEL_OUTPUT_SHAPE[1:]'], {}), '(MODEL_OUTPUT_SHAPE[1:])\n', (13144, 13168), True, 'import numpy as np\n'), ((14109, 14147), 'pickle.dump', 'pickle.dump', (['model_image_inputs', 'f', '(-1)'], {}), '(model_image_inputs, f, -1)\n', (14120, 14147), False, 'import pickle\n'), ((14231, 14264), 'pickle.dump', 'pickle.dump', (['model_outputs', 'f', '(-1)'], {}), '(model_outputs, f, -1)\n', (14242, 14264), False, 'import pickle\n'), ((14343, 14378), 'pickle.dump', 'pickle.dump', (['model_lane_pred', 'f', '(-1)'], {}), '(model_lane_pred, f, -1)\n', (14354, 14378), False, 'import pickle\n'), ((14455, 14502), 'pickle.dump', 'pickle.dump', (['self.global_bev_purtabation', 'f', '(-1)'], {}), '(self.global_bev_purtabation, f, -1)\n', (14466, 14502), False, 'import pickle\n'), ((14585, 14639), 'pickle.dump', 'pickle.dump', (['self.masked_global_bev_purtabation', 'f', '(-1)'], {}), '(self.masked_global_bev_purtabation, f, -1)\n', (14596, 14639), False, 'import pickle\n'), ((14720, 14762), 'pickle.dump', 'pickle.dump', (['self.global_base_color', 'f', '(-1)'], {}), '(self.global_base_color, f, -1)\n', (14731, 14762), False, 'import pickle\n'), ((16012, 16044), 'numpy.zeros', 'np.zeros', (['MODEL_OUTPUT_SHAPE[1:]'], {}), '(MODEL_OUTPUT_SHAPE[1:])\n', (16020, 16044), True, 'import numpy as np\n'), ((19119, 19132), 'numpy.isnan', 'np.isnan', (['tmp'], {}), '(tmp)\n', (19127, 19132), True, 'import numpy as np\n'), ((3351, 3423), 'car_motion_attack.model_laneatt.LaneATTOpenPilot', 'LaneATTOpenPilot', (['ext_mat'], {'mtx_bev2camera': 'self.car_motion.mtx_bev2camera'}), '(ext_mat, mtx_bev2camera=self.car_motion.mtx_bev2camera)\n', (3367, 3423), False, 'from car_motion_attack.model_laneatt import LaneATTOpenPilot\n'), ((5421, 5457), 'car_motion_attack.utils.yuv2rgb', 'yuv2rgb', (['self.global_bev_purtabation'], {}), '(self.global_bev_purtabation)\n', (5428, 5457), False, 'from car_motion_attack.utils import AdamOpt, yuv2rgb, rgb2yuv\n'), ((7044, 7075), 'numpy.vstack', 'np.vstack', (['model_attack_outputs'], {}), '(model_attack_outputs)\n', (7053, 7075), True, 'import numpy as np\n'), ((8405, 8424), 'numpy.sign', 'np.sign', (['patch_grad'], {}), '(patch_grad)\n', (8412, 8424), True, 'import numpy as np\n'), ((9383, 9443), 'numpy.percentile', 'np.percentile', (['patch_diff', '(100 - self.perturbable_area_ratio)'], {}), '(patch_diff, 100 - self.perturbable_area_ratio)\n', (9396, 9443), True, 'import numpy as np\n'), ((10021, 10106), 'numpy.save', 'np.save', (["(self.result_dir + f'_global_patch_{epoch}')", 'self.global_bev_purtabation'], {}), "(self.result_dir + f'_global_patch_{epoch}', self.global_bev_purtabation\n )\n", (10028, 10106), True, 'import numpy as np\n'), ((10177, 10276), 'numpy.save', 'np.save', (["(self.result_dir + f'_global_masked_patch_{epoch}')", 'self.masked_global_bev_purtabation'], {}), "(self.result_dir + f'_global_masked_patch_{epoch}', self.\n masked_global_bev_purtabation)\n", (10184, 10276), True, 'import numpy as np\n'), ((10347, 10432), 'numpy.save', 'np.save', (["(self.result_dir + f'_global_base_color_{epoch}')", 'self.global_base_color'], {}), "(self.result_dir + f'_global_base_color_{epoch}', self.global_base_color\n )\n", (10354, 10432), True, 'import numpy as np\n'), ((10749, 10822), 'numpy.save', 'np.save', (["(self.result_dir + f'model_outputs_{epoch}')", 'model_attack_outputs'], {}), "(self.result_dir + f'model_outputs_{epoch}', model_attack_outputs)\n", (10756, 10822), True, 'import numpy as np\n'), ((12958, 12976), 'car_motion_attack.utils.yuv2rgb', 'yuv2rgb', (['patch_yuv'], {}), '(patch_yuv)\n', (12965, 12976), False, 'from car_motion_attack.utils import AdamOpt, yuv2rgb, rgb2yuv\n'), ((15834, 15852), 'car_motion_attack.utils.yuv2rgb', 'yuv2rgb', (['patch_yuv'], {}), '(patch_yuv)\n', (15841, 15852), False, 'from car_motion_attack.utils import AdamOpt, yuv2rgb, rgb2yuv\n'), ((2728, 2816), 'numpy.random.random', 'np.random.random', (['(self.global_bev_mask.shape[0], self.global_bev_mask.shape[1], 6)'], {}), '((self.global_bev_mask.shape[0], self.global_bev_mask.shape\n [1], 6))\n', (2744, 2816), True, 'import numpy as np\n'), ((3485, 3559), 'car_motion_attack.model_ultrafast.UltraFastOpenPilot', 'UltraFastOpenPilot', (['ext_mat'], {'mtx_bev2camera': 'self.car_motion.mtx_bev2camera'}), '(ext_mat, mtx_bev2camera=self.car_motion.mtx_bev2camera)\n', (3503, 3559), False, 'from car_motion_attack.model_ultrafast import UltraFastOpenPilot\n'), ((6223, 6241), 'car_motion_attack.utils.yuv2rgb', 'yuv2rgb', (['patch_yuv'], {}), '(patch_yuv)\n', (6230, 6241), False, 'from car_motion_attack.utils import AdamOpt, yuv2rgb, rgb2yuv\n'), ((9299, 9333), 'numpy.random.random', 'np.random.random', (['patch_diff.shape'], {}), '(patch_diff.shape)\n', (9315, 9333), True, 'import numpy as np\n'), ((16783, 16810), 'numpy.cos', 'np.cos', (['self.car_motion.yaw'], {}), '(self.car_motion.yaw)\n', (16789, 16810), True, 'import numpy as np\n'), ((16901, 16940), 'numpy.array', 'np.array', (['[[_cos, -_sin], [_sin, _cos]]'], {}), '([[_cos, -_sin], [_sin, _cos]])\n', (16909, 16940), True, 'import numpy as np\n'), ((16967, 16992), 'numpy.dot', 'np.dot', (['pos', 'mat_rotate.T'], {}), '(pos, mat_rotate.T)\n', (16973, 16992), True, 'import numpy as np\n'), ((17057, 17119), 'scipy.interpolate.InterpolatedUnivariateSpline', 'InterpolatedUnivariateSpline', (['pos[:, 0]', 'pos[:, 1]'], {'k': '(3)', 'ext': '(3)'}), '(pos[:, 0], pos[:, 1], k=3, ext=3)\n', (17085, 17119), False, 'from scipy.interpolate import InterpolatedUnivariateSpline\n'), ((17584, 17597), 'numpy.ones', 'np.ones', (['(1760)'], {}), '(1760)\n', (17591, 17597), True, 'import numpy as np\n'), ((3623, 3699), 'car_motion_attack.model_polylanenet.PolyLaneNetOpenPilot', 'PolyLaneNetOpenPilot', (['ext_mat'], {'mtx_bev2camera': 'self.car_motion.mtx_bev2camera'}), '(ext_mat, mtx_bev2camera=self.car_motion.mtx_bev2camera)\n', (3643, 3699), False, 'from car_motion_attack.model_polylanenet import PolyLaneNetOpenPilot\n'), ((16653, 16669), 'numpy.array', 'np.array', (['gt_pos'], {}), '(gt_pos)\n', (16661, 16669), True, 'import numpy as np\n'), ((16840, 16867), 'numpy.sin', 'np.sin', (['self.car_motion.yaw'], {}), '(self.car_motion.yaw)\n', (16846, 16867), True, 'import numpy as np\n'), ((17254, 17278), 'numpy.arange', 'np.arange', (['N_PREDICTIONS'], {}), '(N_PREDICTIONS)\n', (17263, 17278), True, 'import numpy as np\n'), ((11556, 11608), 'numpy.abs', 'np.abs', (['self.car_motion.list_lateral_shift_openpilot'], {}), '(self.car_motion.list_lateral_shift_openpilot)\n', (11562, 11608), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 28 19:36:17 2016
@author: Xiaoqing
"""
from Layer import Layer
from Weights import Weights
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import numpy as np
import copy
class NeuralNet():
def __init__(self, n_Hlayer, n_node, alpha, activation):
self.n_Hlayer = n_Hlayer
self.n_node = n_node
n_node_in = 784
self.n_node_out = 10
self.val_acc = 0
self.alpha = alpha
self.activation = activation
self.Net = [Layer(0, n_node_in, self.activation)]
self.WR = []
for layer in range(1, n_Hlayer+1):
self.Net.append(Layer(layer, self.n_node, self.activation))
if self.activation == 'sigmoid' :
self.Net.append(Layer(n_Hlayer+1, self.n_node_out, self.activation))
elif self.activation == 'relu' :
self.Net.append(Layer(n_Hlayer+1, self.n_node_out, 'softmax'))
for layer_ind in range(n_Hlayer+1):
self.WR.append(Weights(self.Net[layer_ind], self.Net[layer_ind+1]))
def ytrain_preprocess(self, y):
y_new = []
n_label = self.n_node_out
for label in y:
y_temp = np.zeros(n_label)
y_temp[label] = 1
y_new.append(y_temp)
y_new = np.array(y_new)
return y_new
def FeedForward(self, sample):
self.Net[0].outputs0(sample)
for index in range(1, self.n_Hlayer+2):
self.Net[index].compute_outputs(self.Net[index-1], self.WR[index-1])
self.y_pre = self.Net[-1].outputs
yp = self.Net[-1].outputs
return yp
def BackPropagation(self, y):
#output_layer = self.Net[-1]
yp = self.Net[-1].outputs
error = y - yp
self.Net[-1].compute_deltas0(error)
for layer in reversed(self.Net[:-1]):
ind = layer.layer_index
#next_layer = self.Net[ind+1]
self.Net[ind].compute_deltas(self.WR[ind], self.Net[ind+1])
def WeightUpdate(self):
for ind in range(self.n_Hlayer+1):
self.WR[ind].weights_update(self.alpha)
def train_a_sample(self, sample, y):
yp = self.FeedForward(sample)
self.BackPropagation(y)
self.WeightUpdate()
def train(self, X, y, iteration):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3)
y_new = self.ytrain_preprocess(y_train)
self.Xtest = X_test
self.ytest = y_test
self.tra_acc = np.zeros(iteration)
self.val_acc = np.zeros(iteration)
for index in range(iteration):
print("iteration: ", index)
for s in range(len(y_train)):
self.train_a_sample(X_train[s], y_new[s])
yp_train = self.predict(X_train)
self.tra_acc[index] = accuracy_score(y_train, yp_train)
print('training accuracy:', self.tra_acc[index])
yp_test = self.predict(X_test)
self.val_acc[index] = accuracy_score(y_test, yp_test)
print('validation accuracy:', self.val_acc[index])
def predict(self, X):
m, n = np.shape(X)
y_p = np.zeros(m)
for ind, sample in enumerate(X):
#yp = copy.deepcopy(self.FeedForward(sample))
yp = self.FeedForward(sample)
y_p[ind] = np.argmax(yp)
return y_p
| [
"Weights.Weights",
"sklearn.model_selection.train_test_split",
"numpy.argmax",
"numpy.array",
"numpy.zeros",
"Layer.Layer",
"numpy.shape",
"sklearn.metrics.accuracy_score"
] | [((1344, 1359), 'numpy.array', 'np.array', (['y_new'], {}), '(y_new)\n', (1352, 1359), True, 'import numpy as np\n'), ((2431, 2468), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)'}), '(X, y, test_size=0.3)\n', (2447, 2468), False, 'from sklearn.model_selection import train_test_split\n'), ((2598, 2617), 'numpy.zeros', 'np.zeros', (['iteration'], {}), '(iteration)\n', (2606, 2617), True, 'import numpy as np\n'), ((2641, 2660), 'numpy.zeros', 'np.zeros', (['iteration'], {}), '(iteration)\n', (2649, 2660), True, 'import numpy as np\n'), ((3240, 3251), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (3248, 3251), True, 'import numpy as np\n'), ((3266, 3277), 'numpy.zeros', 'np.zeros', (['m'], {}), '(m)\n', (3274, 3277), True, 'import numpy as np\n'), ((570, 606), 'Layer.Layer', 'Layer', (['(0)', 'n_node_in', 'self.activation'], {}), '(0, n_node_in, self.activation)\n', (575, 606), False, 'from Layer import Layer\n'), ((1247, 1264), 'numpy.zeros', 'np.zeros', (['n_label'], {}), '(n_label)\n', (1255, 1264), True, 'import numpy as np\n'), ((2919, 2952), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_train', 'yp_train'], {}), '(y_train, yp_train)\n', (2933, 2952), False, 'from sklearn.metrics import accuracy_score\n'), ((3091, 3122), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'yp_test'], {}), '(y_test, yp_test)\n', (3105, 3122), False, 'from sklearn.metrics import accuracy_score\n'), ((3442, 3455), 'numpy.argmax', 'np.argmax', (['yp'], {}), '(yp)\n', (3451, 3455), True, 'import numpy as np\n'), ((700, 742), 'Layer.Layer', 'Layer', (['layer', 'self.n_node', 'self.activation'], {}), '(layer, self.n_node, self.activation)\n', (705, 742), False, 'from Layer import Layer\n'), ((815, 868), 'Layer.Layer', 'Layer', (['(n_Hlayer + 1)', 'self.n_node_out', 'self.activation'], {}), '(n_Hlayer + 1, self.n_node_out, self.activation)\n', (820, 868), False, 'from Layer import Layer\n'), ((1055, 1108), 'Weights.Weights', 'Weights', (['self.Net[layer_ind]', 'self.Net[layer_ind + 1]'], {}), '(self.Net[layer_ind], self.Net[layer_ind + 1])\n', (1062, 1108), False, 'from Weights import Weights\n'), ((937, 984), 'Layer.Layer', 'Layer', (['(n_Hlayer + 1)', 'self.n_node_out', '"""softmax"""'], {}), "(n_Hlayer + 1, self.n_node_out, 'softmax')\n", (942, 984), False, 'from Layer import Layer\n')] |
# ==============================================================================
# MIT License
#
# Copyright 2020 Institute for Automotive Engineering of RWTH Aachen University.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
import numpy as np
# right_front->front
# rigth_back ->right
# left_front ->left
# left_back ->rear
# for dataaset 1_FRLR
H = [
np.array([[ 660.228420, 0.452129, 619.723806],
[0.000000, 661.579398, 356.276106],
[0.000000, 0.000000, 1.000000]]), # front
np.array([[ 674.732226, -1.280735, 640.149919],
[0.000000, 675.114779, 388.737670],
[0.000000, 0.000000, 1.000000]]), # rear
np.array([[ 664.982627, 1.923222, 635.891510],
[0.000000, 663.965607, 356.638070],
[0.000000, 0.000000, 1.000000]]), # left
np.array([[ 671.771379, 1.406491, 582.080233],
[0.000000, 669.243052, 373.943015],
[0.000000, 0.000000, 1.000000]]) # right
]
| [
"numpy.array"
] | [((1450, 1547), 'numpy.array', 'np.array', (['[[660.22842, 0.452129, 619.723806], [0.0, 661.579398, 356.276106], [0.0, \n 0.0, 1.0]]'], {}), '([[660.22842, 0.452129, 619.723806], [0.0, 661.579398, 356.276106],\n [0.0, 0.0, 1.0]])\n', (1458, 1547), True, 'import numpy as np\n'), ((1599, 1697), 'numpy.array', 'np.array', (['[[674.732226, -1.280735, 640.149919], [0.0, 675.114779, 388.73767], [0.0, \n 0.0, 1.0]]'], {}), '([[674.732226, -1.280735, 640.149919], [0.0, 675.114779, 388.73767],\n [0.0, 0.0, 1.0]])\n', (1607, 1697), True, 'import numpy as np\n'), ((1748, 1844), 'numpy.array', 'np.array', (['[[664.982627, 1.923222, 635.89151], [0.0, 663.965607, 356.63807], [0.0, 0.0,\n 1.0]]'], {}), '([[664.982627, 1.923222, 635.89151], [0.0, 663.965607, 356.63807],\n [0.0, 0.0, 1.0]])\n', (1756, 1844), True, 'import numpy as np\n'), ((1896, 1994), 'numpy.array', 'np.array', (['[[671.771379, 1.406491, 582.080233], [0.0, 669.243052, 373.943015], [0.0, \n 0.0, 1.0]]'], {}), '([[671.771379, 1.406491, 582.080233], [0.0, 669.243052, 373.943015],\n [0.0, 0.0, 1.0]])\n', (1904, 1994), True, 'import numpy as np\n')] |
# Something
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Magic to get the library directory properly
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
from common import moving_average
def graph_seed_scatter(sdlabel,
xdata,
ydata,
ax,
mtitle = '',
xtitle = '',
ytitle = '',
color = 'b',
xlabel = True):
r""" Generic 1D plotting function for data
"""
ax.set_title(mtitle)
ax.set_ylabel(ytitle)
if xlabel: ax.set_xlabel(xtitle)
ax.scatter(xdata, ydata, color = color, label = sdlabel)
def graph_seed_temperature(sd,
ax,
color = 'b',
xlabel = True):
r""" Plots the temperature of the seed as measured by kinetic theory
"""
graph_seed_scatter(sd.label, sd.df["timestep"], sd.df["T"], ax, mtitle = "Temperature", xtitle = "Timestep", ytitle = "Temperature (kT)", color = color)
def graph_seed_pressure(sd,
ax,
color = 'b',
xlabel = True):
r""" Plots the pressure of the seed as measured by stress tensor
"""
graph_seed_scatter(sd.label, sd.df["timestep"], sd.df["P"], ax, mtitle = "Pressure", xtitle = "Timestep", ytitle = "Presure (kT/$\sigma^{3}$)", color = color)
def graph_seed_area(sd,
ax,
color = 'b',
xlabel = 'True'):
r""" Plots the area of the box in the XY dimension
"""
graph_seed_scatter(sd.label, sd.df["timestep"], sd.df["membrane_area"], ax, mtitle = "Membrane Area", xtitle = "Timestep", ytitle = "Membrane area ($\sigma^{2}$)", color = color)
def graph_seed_lifetime_distribution(sdlabel,
df,
axarr,
mtitle = '',
xtitle = '',
ytitle = '',
color = 'b',
xlabel = True):
r""" Plotting function for total lifetime of septin attachments
"""
hist_free, bin_edges_free = np.histogram(df['free'], bins = 10, range = (0, 200))
hist_near, bin_edges_near = np.histogram(df['near'], bins = 10, range = (0, 200))
hist_surf, bin_edges = np.histogram(df['surface'], bins = 10, range = (0, 200))
hist_inte, bin_edges = np.histogram(df['intermediate'], bins = 10, range = (0, 200))
hist_deep, bin_edges = np.histogram(df['deep'], bins = 10, range = (0, 200))
bin_mids = moving_average(bin_edges)
bin_width = bin_mids[1] - bin_mids[0]
axarr[0][0].bar(bin_mids, hist_free, bin_width)
axarr[0][1].bar(bin_mids, hist_near, bin_width)
axarr[1][0].bar(bin_mids, hist_surf, bin_width)
axarr[1][1].bar(bin_mids, hist_inte, bin_width)
axarr[2][0].bar(bin_mids, hist_deep, bin_width)
for ax1 in axarr:
for ax2 in ax1:
ax2.set_xlabel(xtitle)
ax2.set_ylabel(ytitle)
axarr[0][0].set_title("Free")
axarr[0][1].set_title("Near")
axarr[1][0].set_title("Surface")
axarr[1][1].set_title("Intermediate")
axarr[2][0].set_title("Deep")
axarr[-1][-1].axis('off')
def suq_curve(q, N, A, kc, gamma):
return (N/A) / (kc*q**4 + gamma*q**2)
def graph_seed_membranemodes(sd,
ax,
color = 'b',
xlabel = True):
r""" Plotting function for total membrane modes
"""
x_fft = sd.df['x_fft'].to_numpy()
su_fft = sd.df['su_fft'].to_numpy()
x_fft = x_fft[~np.isnan(x_fft)]
su_fft = su_fft[~np.isnan(su_fft)]
## XXX: Remove the direct calculation as it is hard to perform on a non-uniform grid
#x_direct = sd.df['x_direct'].to_numpy()
#su_direct = sd.df['su_direct'].to_numpy()
#x_direct = x_direct[~np.isnan(x_direct)]
#su_direct = su_direct[~np.isnan(su_direct)]
ax.scatter(x_fft[1:], su_fft[1:], color = 'b', marker = '+', linewidth = 1)
#ax.scatter(x_direct[1:], su_direct[1:], color = 'r', marker = 'o', s = 80, facecolors = 'none')
# Figure out where cutoff etc are
qcutoff_mean = np.nanmean(sd.df['uq_2d_fft_qcutoff'].to_numpy())
area_mean = np.nanmean(sd.df['membrane_area'].to_numpy())
# Figure out where the lower cutoff is
idx = np.where(np.greater(x_fft, qcutoff_mean))
idx = np.int32(idx[0][0])
jdx = np.where(np.greater(x_fft, 1.0))
jdx = np.int32(jdx[0][0])
# Generate some guesses for the fit
nlipids_per_leaflet = sd.lipids.nlipids_per_leaflet
kcguess1 = sd.kT * nlipids_per_leaflet / area_mean / su_fft[idx] / (x_fft[idx]**4)
# Try to do 2 fits, with and without the surface tension term
from scipy.optimize import curve_fit
popt_fft_kc, pcov_fft_kc = curve_fit(lambda q, kc: suq_curve(q, nlipids_per_leaflet, area_mean, kc, 0.0), x_fft[idx:jdx], su_fft[idx:jdx], bounds = ([0.0, np.inf]), p0 = [kcguess1])
popt_fft_ga, pcov_ga = curve_fit(lambda q, kc, gamma: suq_curve(q, nlipids_per_leaflet, area_mean, kc, gamma), x_fft[idx:jdx], su_fft[idx:jdx], bounds = ([0.0, -np.inf], [np.inf, np.inf]), p0 = [kcguess1, 0.0])
#popt_direct_kc, pcov_direct_kc = curve_fit(lambda q, kc: suq_curve(q, nlipids_per_leaflet, area_mean, kc, 0.0), x_direct[idx:jdx], su_direct[idx:jdx], bounds = ([0.0, np.inf]), p0 = [kcguess1])
#popt_direct_ga, pcov_direct_ga = curve_fit(lambda q, kc, gamma: suq_curve(q, nlipids_per_leaflet, area_mean, kc, gamma), x_direct[idx:jdx], su_direct[idx:jdx], bounds = ([0.0, -np.inf], [np.inf, np.inf]), p0 = [kcguess1, 0.0])
print(f"Simuation fit values:")
print(f" kc(guess): {kcguess1}")
print(f" ----No gamma----")
print(f" FFT kc = {popt_fft_kc[0]}")
#print(f" Direct kc = {popt_direct_kc[0]}")
print(f" ----With gamma----")
print(f" FFT kc, gamma = {popt_fft_ga[0]}, {popt_fft_ga[1]}")
#print(f" Direct kc, gamma = {popt_direct_ga[0]}, {popt_direct_ga[1]}")
ax.plot(x_fft[idx:jdx], suq_curve(x_fft[idx:jdx], N = nlipids_per_leaflet, A = area_mean, kc = popt_fft_kc[0], gamma = 0.0), color = 'b', linestyle = '--')
ax.plot(x_fft[idx:jdx], suq_curve(x_fft[idx:jdx], N = nlipids_per_leaflet, A = area_mean, kc = popt_fft_ga[0], gamma = popt_fft_ga[1]), color = 'b', linestyle = ':')
#ax.plot(x_direct[idx:jdx], suq_curve(x_direct[idx:jdx], N = nlipids_per_leaflet, A = area_mean, kc = popt_direct_kc[0], gamma = 0.0), color = 'r', linestyle = '--')
#ax.plot(x_direct[idx:jdx], suq_curve(x_direct[idx:jdx], N = nlipids_per_leaflet, A = area_mean, kc = popt_direct_ga[0], gamma = popt_direct_ga[1]), color = 'r', linestyle = ':')
#ax.plot(x_fft[idx:jdx], suq_curve(x_fft[idx:jdx], N = nlipids_per_leaflet, A = area_mean, kc = 1.0, gamma = 0.0), color = 'm', linestyle = '--')
# Plot the vertical line for qcutoff
ax.axvline(x = qcutoff_mean, ymin = 0, ymax = 1.0, color = 'k', linestyle = '-')
ax.set_ylim(1e-1, 1e6)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_title("Membrane Modes")
if xlabel: ax.set_xlabel(r'q ($\sigma^{-1}$)')
ax.set_ylabel(r'$ N \langle | u(q) |^{2} \rangle $ ($\sigma^{2}$)')
# XXX: Get out the data, remove this once LAMMPS is integrated into dragonfruit
print(f"WARNING: Dumping CSV file of membrane modes, please remove later!")
with open('dumpfile_hoomd.csv', 'w') as stream:
dfs = []
leafletareadf = pd.DataFrame([area_mean, nlipids_per_leaflet], columns=['other'])
#valuesdf = sd.df[['x_fft', 'su_fft', 'x_direct', 'su_direct']]
valuesdf = sd.df[['x_fft', 'su_fft']]
dfs.append(leafletareadf)
dfs.append(valuesdf)
mdf = pd.concat(dfs, axis=1)
mdf.to_csv(stream, index=False)
def msd_fit(t, n, D):
return 2*n*D*t
def graph_seed_simplespheres_msd(sdlabel,
deltatau,
df,
ax,
mtitle = '',
xtitle = '',
ytitle = '',
color = 'b',
xlabel = True):
r""" Plotting function for simple spehres MSD
"""
timesteps = df['timestep'].to_numpy()*deltatau
msd = df['msd_simple'].to_numpy()
ax.scatter(timesteps, msd, color = 'b', marker = 'o', s = 80, facecolors = 'none')
# Put a fit on the curve
from scipy.optimize import curve_fit
popt, pcov = curve_fit(lambda t, D: msd_fit(t, 3, D), timesteps, msd, 1.0)
ax.plot(timesteps, msd_fit(timesteps, 3, popt[0]), 'r--', linewidth = 1)
print(f"Diffusion: {popt[0]}")
ax.set_title(mtitle)
ax.set_title(mtitle)
if xlabel: ax.set_xlabel(xtitle)
ax.set_ylabel(ytitle)
| [
"numpy.histogram",
"numpy.greater",
"numpy.int32",
"os.path.dirname",
"numpy.isnan",
"pandas.DataFrame",
"common.moving_average",
"pandas.concat"
] | [((2401, 2450), 'numpy.histogram', 'np.histogram', (["df['free']"], {'bins': '(10)', 'range': '(0, 200)'}), "(df['free'], bins=10, range=(0, 200))\n", (2413, 2450), True, 'import numpy as np\n'), ((2487, 2536), 'numpy.histogram', 'np.histogram', (["df['near']"], {'bins': '(10)', 'range': '(0, 200)'}), "(df['near'], bins=10, range=(0, 200))\n", (2499, 2536), True, 'import numpy as np\n'), ((2568, 2620), 'numpy.histogram', 'np.histogram', (["df['surface']"], {'bins': '(10)', 'range': '(0, 200)'}), "(df['surface'], bins=10, range=(0, 200))\n", (2580, 2620), True, 'import numpy as np\n'), ((2652, 2709), 'numpy.histogram', 'np.histogram', (["df['intermediate']"], {'bins': '(10)', 'range': '(0, 200)'}), "(df['intermediate'], bins=10, range=(0, 200))\n", (2664, 2709), True, 'import numpy as np\n'), ((2741, 2790), 'numpy.histogram', 'np.histogram', (["df['deep']"], {'bins': '(10)', 'range': '(0, 200)'}), "(df['deep'], bins=10, range=(0, 200))\n", (2753, 2790), True, 'import numpy as np\n'), ((2810, 2835), 'common.moving_average', 'moving_average', (['bin_edges'], {}), '(bin_edges)\n', (2824, 2835), False, 'from common import moving_average\n'), ((4665, 4684), 'numpy.int32', 'np.int32', (['idx[0][0]'], {}), '(idx[0][0])\n', (4673, 4684), True, 'import numpy as np\n'), ((4738, 4757), 'numpy.int32', 'np.int32', (['jdx[0][0]'], {}), '(jdx[0][0])\n', (4746, 4757), True, 'import numpy as np\n'), ((182, 207), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (197, 207), False, 'import os\n'), ((4622, 4653), 'numpy.greater', 'np.greater', (['x_fft', 'qcutoff_mean'], {}), '(x_fft, qcutoff_mean)\n', (4632, 4653), True, 'import numpy as np\n'), ((4704, 4726), 'numpy.greater', 'np.greater', (['x_fft', '(1.0)'], {}), '(x_fft, 1.0)\n', (4714, 4726), True, 'import numpy as np\n'), ((7743, 7808), 'pandas.DataFrame', 'pd.DataFrame', (['[area_mean, nlipids_per_leaflet]'], {'columns': "['other']"}), "([area_mean, nlipids_per_leaflet], columns=['other'])\n", (7755, 7808), True, 'import pandas as pd\n'), ((8004, 8026), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(1)'}), '(dfs, axis=1)\n', (8013, 8026), True, 'import pandas as pd\n'), ((3863, 3878), 'numpy.isnan', 'np.isnan', (['x_fft'], {}), '(x_fft)\n', (3871, 3878), True, 'import numpy as np\n'), ((3902, 3918), 'numpy.isnan', 'np.isnan', (['su_fft'], {}), '(su_fft)\n', (3910, 3918), True, 'import numpy as np\n')] |
import numpy as np
import utils.utils as utils
import os
def argsProcessor():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--outputDir", help="output Directory of Data")
parser.add_argument("-i", "--inputDir", help="input Directory of data")
parser.add_argument("-s", "--saveName", help="fileNameForSaving")
return parser.parse_args()
if __name__ == "__main__":
args = argsProcessor()
inputDataDir = args.inputDir
outputDataDir = args.outputDir
GT_DIR = inputDataDir + "/gt.csv"
VALIDATION_PERCENTAGE = .20
TEST_PERCENTAGE = .10
Debug = True
size= (32,32)
image_list, gt_list, file_name = utils.load_data(inputDataDir, GT_DIR, size=size, debug=Debug, limit=10000)
image_list, gt_list = utils.unison_shuffled_copies(image_list, gt_list)
print (len(image_list))
if (Debug):
print ("(Image_list_len, gt_list_len)", (len(image_list), len(gt_list)))
train_image = image_list[0:max(1, int(len(image_list) * (1 - VALIDATION_PERCENTAGE)))]
validate_image = image_list[int(len(image_list) * (1 - VALIDATION_PERCENTAGE)):len(image_list) - 1]
train_gt = gt_list[0:max(1, int(len(image_list) * (1 - VALIDATION_PERCENTAGE)))]
validate_gt = gt_list[int(len(image_list) * (1 - VALIDATION_PERCENTAGE)):len(image_list) - 1]
if (Debug):
print ("(Train_Image_len, Train_gt_len)", (len(train_image), len(train_gt)))
print ("(Validate_Image_len, Validate_gt_len)", (len(validate_image), len(validate_gt)))
np.save(outputDataDir + args.saveName + "trainGtCorners", train_gt)
np.save(outputDataDir + args.saveName + "trainImagesCorners", train_image)
np.save(outputDataDir + args.saveName + "validateGTCorners", validate_gt)
np.save(outputDataDir + args.saveName + "validateImagesCorners", validate_image)
# 0/0 | [
"numpy.save",
"utils.utils.unison_shuffled_copies",
"argparse.ArgumentParser",
"utils.utils.load_data"
] | [((112, 137), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (135, 137), False, 'import argparse\n'), ((688, 762), 'utils.utils.load_data', 'utils.load_data', (['inputDataDir', 'GT_DIR'], {'size': 'size', 'debug': 'Debug', 'limit': '(10000)'}), '(inputDataDir, GT_DIR, size=size, debug=Debug, limit=10000)\n', (703, 762), True, 'import utils.utils as utils\n'), ((789, 838), 'utils.utils.unison_shuffled_copies', 'utils.unison_shuffled_copies', (['image_list', 'gt_list'], {}), '(image_list, gt_list)\n', (817, 838), True, 'import utils.utils as utils\n'), ((1550, 1617), 'numpy.save', 'np.save', (["(outputDataDir + args.saveName + 'trainGtCorners')", 'train_gt'], {}), "(outputDataDir + args.saveName + 'trainGtCorners', train_gt)\n", (1557, 1617), True, 'import numpy as np\n'), ((1622, 1696), 'numpy.save', 'np.save', (["(outputDataDir + args.saveName + 'trainImagesCorners')", 'train_image'], {}), "(outputDataDir + args.saveName + 'trainImagesCorners', train_image)\n", (1629, 1696), True, 'import numpy as np\n'), ((1701, 1774), 'numpy.save', 'np.save', (["(outputDataDir + args.saveName + 'validateGTCorners')", 'validate_gt'], {}), "(outputDataDir + args.saveName + 'validateGTCorners', validate_gt)\n", (1708, 1774), True, 'import numpy as np\n'), ((1779, 1864), 'numpy.save', 'np.save', (["(outputDataDir + args.saveName + 'validateImagesCorners')", 'validate_image'], {}), "(outputDataDir + args.saveName + 'validateImagesCorners', validate_image\n )\n", (1786, 1864), True, 'import numpy as np\n')] |
# Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Various connectors.
"""
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from torch import nn
from torch.distributions.distribution import Distribution
from texar.torch.core import get_activation_fn
from texar.torch.hyperparams import HParams
from texar.torch.modules.connectors.connector_base import ConnectorBase
from texar.torch.utils import nest
from texar.torch.utils import utils
from texar.torch.utils.types import MaybeTuple
__all__ = [
"ConstantConnector",
"ForwardConnector",
"MLPTransformConnector",
"ReparameterizedStochasticConnector",
"StochasticConnector",
# "ConcatConnector"
]
TensorStruct = Union[List[torch.Tensor],
Dict[Any, torch.Tensor],
MaybeTuple[torch.Tensor]]
OutputSize = MaybeTuple[Union[int, torch.Size]]
ActivationFn = Callable[[torch.Tensor], torch.Tensor]
LinearLayer = Callable[[torch.Tensor], torch.Tensor]
def _assert_same_size(outputs: TensorStruct,
output_size: OutputSize):
r"""Check if outputs match output_size
Args:
outputs: A tensor or a (nested) tuple of tensors
output_size: Can be an ``int``, a ``torch.Size``, or a (nested)
tuple of ``int`` or ``torch.Size``.
"""
flat_output_size = nest.flatten(output_size)
flat_output = nest.flatten(outputs)
for (output, size) in zip(flat_output, flat_output_size):
if isinstance(size, torch.Size):
if output[0].size() != size:
raise ValueError("The output size does not match"
"the required output_size")
elif output[0].size()[-1] != size:
raise ValueError(
"The output size does not match the required output_size")
def _get_sizes(sizes: List[Any]) -> List[int]:
r"""
Args:
sizes: A list of ``int`` or ``torch.Size``. If each element is of type
``torch.Size``, the size is computed by taking the product of the
shape.
Returns:
A list of sizes with ``torch.Size`` replaced by product of its
individual dimensions
"""
if isinstance(sizes[0], torch.Size):
size_list = [np.prod(shape) for shape in sizes]
else:
size_list = sizes
return size_list
def _sum_output_size(output_size: OutputSize) -> int:
r"""Return sum of all dim values in :attr:`output_size`
Args:
output_size: Can be an ``int``, a ``torch.Size``, or a (nested)
tuple of ``int`` or ``torch.Size``.
"""
flat_output_size = nest.flatten(output_size)
size_list = _get_sizes(flat_output_size)
ret = sum(size_list)
return ret
def _mlp_transform(inputs: TensorStruct,
output_size: OutputSize,
linear_layer: Optional[LinearLayer] = None,
activation_fn: Optional[ActivationFn] = None) -> Any:
r"""Transforms inputs through a fully-connected layer that creates
the output with specified size.
Args:
inputs: A Tensor of shape `[batch_size, d1, ..., dn]`, or a (nested)
tuple of such elements. The dimensions `d1, ..., dn` will be flatten
and transformed by a dense layer.
output_size: Can be an ``int``, a ``torch.Size``, or a (nested)
tuple of ``int`` or ``torch.Size``.
activation_fn: Activation function applied to the output.
:returns:
If :attr:`output_size` is an ``int`` or a ``torch.Size``,
returns a tensor of shape ``[batch_size, *, output_size]``.
If :attr:`output_size` is a tuple of ``int`` or ``torch.Size``,
returns a tuple having the same structure as :attr:`output_size`,
where each element has the same size as defined in :attr:`output_size`.
"""
# Flatten inputs
flat_input = nest.flatten(inputs)
flat_input = [x.view(-1, x.size(-1)) for x in flat_input]
concat_input = torch.cat(flat_input, 1)
# Get output dimension
flat_output_size = nest.flatten(output_size)
size_list = _get_sizes(flat_output_size)
fc_output = concat_input
if linear_layer is not None:
fc_output = linear_layer(fc_output)
if activation_fn is not None:
fc_output = activation_fn(fc_output)
flat_output = torch.split(fc_output, size_list, dim=1)
flat_output = list(flat_output)
if isinstance(flat_output_size[0], torch.Size):
flat_output = [torch.reshape(output, (-1,) + shape) for output, shape
in zip(flat_output, flat_output_size)]
output = nest.pack_sequence_as(structure=output_size,
flat_sequence=flat_output)
return output
class ConstantConnector(ConnectorBase):
r"""Creates a constant tensor or (nested) tuple of Tensors that
contains a constant value.
Args:
output_size: Size of output **excluding** the batch dimension. For
example, set :attr:`output_size` to ``dim`` to generate output of
shape ``[batch_size, dim]``.
Can be an ``int``, a tuple of ``int``, a ``torch.Size``,
or a tuple of ``torch.Size``.
For example, to transform inputs to have decoder state size, set
:python:`output_size=decoder.state_size`.
If :attr:`output_size` is a tuple ``(1, 2, 3)``, then the
output structure will be
``([batch_size * 1], [batch_size * 2], [batch_size * 3])``.
If :attr:`output_size` is ``torch.Size([1, 2, 3])``, then the
output structure will be ``[batch_size, 1, 2, 3]``.
hparams (dict, optional): Hyperparameters. Missing
hyperparameter will be set to default values. See
:meth:`default_hparams` for the hyperparameter structure and
default values.
This connector does not have trainable parameters.
Example:
.. code-block:: python
state_size = (1, 2, 3)
connector = ConstantConnector(state_size, hparams={"value": 1.})
one_state = connector(batch_size=64)
# `one_state` structure: (Tensor_1, Tensor_2, Tensor_3),
# Tensor_1.size() == torch.Size([64, 1])
# Tensor_2.size() == torch.Size([64, 2])
# Tensor_3.size() == torch.Size([64, 3])
# Tensors are filled with 1.0.
size = torch.Size([1, 2, 3])
connector_size = ConstantConnector(size, hparams={"value": 2.})
size_state = connector_size(batch_size=64)
# `size_state` structure: Tensor with size [64, 1, 2, 3].
# Tensor is filled with 2.0.
"""
def __init__(self,
output_size: OutputSize,
hparams: Optional[HParams] = None):
super().__init__(output_size, hparams=hparams)
self.value = self.hparams.value
@staticmethod
def default_hparams() -> dict:
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
"value": 0.,
"name": "constant_connector"
}
Here:
`"value"`: float
The constant scalar that the output tensor(s) has.
`"name"`: str
Name of the connector.
"""
return {
"value": 0.,
"name": "constant_connector"
}
def forward(self, # type: ignore
batch_size: Union[int, torch.Tensor]) -> Any:
r"""Creates output tensor(s) that has the given value.
Args:
batch_size: An ``int`` or ``int`` scalar tensor, the
batch size.
:returns:
A (structure of) tensor whose structure is the same as
:attr:`output_size`, with value specified by
``value`` or :attr:`hparams`.
"""
def full_tensor(x):
if isinstance(x, torch.Size):
return torch.full((batch_size,) + x, self.value)
else:
return torch.full((batch_size, x), self.value)
output = utils.map_structure(
full_tensor,
self._output_size)
return output
class ForwardConnector(ConnectorBase):
r"""Transforms inputs to have specified structure.
Example:
.. code-block:: python
state_size = namedtuple('LSTMStateTuple', ['h', 'c'])(256, 256)
# state_size == LSTMStateTuple(c=256, h=256)
connector = ForwardConnector(state_size)
output = connector([tensor_1, tensor_2])
# output == LSTMStateTuple(c=tensor_1, h=tensor_2)
Args:
output_size: Size of output **excluding** the batch dimension. For
example, set :attr:`output_size` to ``dim`` to generate output of
shape ``[batch_size, dim]``.
Can be an ``int``, a tuple of ``int``, a ``torch.Size``, or a
tuple of ``torch.Size``.
For example, to transform inputs to have decoder state size, set
:python:`output_size=decoder.state_size`.
hparams (dict, optional): Hyperparameters. Missing
hyperparameter will be set to default values. See
:meth:`default_hparams` for the hyperparameter structure and
default values.
This connector does not have trainable parameters.
See :meth:`forward` for the inputs and outputs of the connector.
The input to the connector must have the same structure with
:attr:`output_size`, or must have the same number of elements and be
re-packable into the structure of :attr:`output_size`. Note that if input
is or contains a ``dict`` instance, the keys will be sorted to pack in
deterministic order (See :func:`~texar.torch.utils.nest.pack_sequence_as`).
"""
def __init__(self,
output_size: OutputSize,
hparams: Optional[HParams] = None):
super().__init__(output_size, hparams=hparams)
@staticmethod
def default_hparams() -> dict:
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
"name": "forward_connector"
}
Here:
`"name"`: str
Name of the connector.
"""
return {
"name": "forward_connector"
}
def forward(self, # type: ignore
inputs: TensorStruct) -> Any:
r"""Transforms inputs to have the same structure as with
:attr:`output_size`. Values of the inputs are not changed.
:attr:`inputs` must either have the same structure, or have the same
number of elements with :attr:`output_size`.
Args:
inputs: The input (structure of) tensor to pass forward.
:returns:
A (structure of) tensors that re-packs :attr:`inputs` to have
the specified structure of :attr:`output_size`.
"""
flat_input = nest.flatten(inputs)
output = nest.pack_sequence_as(
self._output_size, flat_input)
return output
class MLPTransformConnector(ConnectorBase):
r"""Transforms inputs with an MLP layer and packs the results into the
specified structure and size.
Example:
.. code-block:: python
cell = LSTMCell(num_units=256)
# cell.state_size == LSTMStateTuple(c=256, h=256)
connector = MLPTransformConnector(cell.state_size)
inputs = torch.zeros([64, 10])
output = connector(inputs)
# output == LSTMStateTuple(c=tensor_of_shape_(64, 256),
# h=tensor_of_shape_(64, 256))
.. code-block:: python
## Use to connect encoder and decoder with different state size
encoder = UnidirectionalRNNEncoder(...)
_, final_state = encoder(inputs=...)
decoder = BasicRNNDecoder(...)
connector = MLPTransformConnector(decoder.state_size)
_ = decoder(
initial_state=connector(final_state),
...)
Args:
output_size: Size of output **excluding** the batch dimension. For
example, set :attr:`output_size` to ``dim`` to generate output of
shape ``[batch_size, dim]``.
Can be an ``int``, a tuple of ``int``, a ``torch.Size``,
or a tuple of ``torch.Size``.
For example, to transform inputs to have decoder state size, set
:python:`output_size=decoder.state_size`.
linear_layer_dim (int): Value of final dim of the input tensors i.e. the
input dim of the mlp linear layer.
hparams (dict, optional): Hyperparameters. Missing hyperparameter will
be set to default values. See :meth:`default_hparams` for the
hyperparameter structure and default values.
The input to the connector can have arbitrary structure and size.
"""
def __init__(self,
output_size: OutputSize,
linear_layer_dim: int,
hparams: Optional[HParams] = None):
super().__init__(output_size, hparams=hparams)
self._linear_layer = nn.Linear(
linear_layer_dim, _sum_output_size(output_size))
self._activation_fn = get_activation_fn(
self.hparams.activation_fn)
@staticmethod
def default_hparams() -> dict:
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
"activation_fn": "texar.torch.core.layers.identity",
"name": "mlp_connector"
}
Here:
`"activation_fn"`: str or callable
The activation function applied to the outputs of the MLP
transformation layer. Can
be a function, or its name or module path.
`"name"`: str
Name of the connector.
"""
return {
"activation_fn": "texar.torch.core.layers.identity",
"name": "mlp_connector"
}
def forward(self, # type: ignore
inputs: TensorStruct) -> Any:
r"""Transforms inputs with an MLP layer and packs the results to have
the same structure as specified by :attr:`output_size`.
Args:
inputs: Input (structure of) tensors to be transformed. Must be a
tensor of shape ``[batch_size, ...]`` or a (nested)
tuple of such Tensors. That is, the first dimension of
(each) tensor must be the batch dimension.
:returns:
A tensor or a (nested) tuple of tensors of the same structure of
:attr:`output_size`.
"""
output = _mlp_transform(
inputs, self._output_size,
self._linear_layer, self._activation_fn)
return output
class ReparameterizedStochasticConnector(ConnectorBase):
r"""Samples from a distribution with reparameterization trick, and
transforms samples into specified size.
Reparameterization allows gradients to be back-propagated through the
stochastic samples. Used in, e.g., Variational Autoencoders (VAEs).
Example:
.. code-block:: python
# Initialized without num_samples
cell = LSTMCell(num_units=256)
# cell.state_size == LSTMStateTuple(c=256, h=256)
mu = torch.zeros([16, 100])
var = torch.ones([100])
connector = ReparameterizedStochasticConnector(
cell.state_size,
mlp_input_size=mu.size()[-1],
distribution="MultivariateNormal",
distribution_kwargs={
"loc": mu,
"scale_tril": torch.diag(var)})
output, sample = connector()
# output == LSTMStateTuple(c=tensor_of_shape_(16, 256),
# h=tensor_of_shape_(16, 256))
# sample == Tensor([16, 100])
output_, sample_ = connector(num_samples=4)
# output_ == LSTMStateTuple(c=tensor_of_shape_(4, 16, 256),
# h=tensor_of_shape_(4, 16, 256))
# sample == Tensor([4, 16, 100])
Args:
output_size: Size of output **excluding** the batch dimension. For
example, set ``output_size`` to ``dim`` to generate output of
shape ``[batch_size, dim]``.
Can be an ``int``, a tuple of ``int``, a ``torch.Size``, or
a tuple of ``torch.Size``.
For example, to transform inputs to have decoder state size, set
:python:`output_size=decoder.state_size`.
mlp_input_size: Size of MLP transfer process input, which is equal to
the distribution result size **excluding** the batch dimension,
Can be ``int`` or ``torch.Size`` or a tuple of ``int``.
distribution: A instance or name ``str`` of subclass of
:torch:`distributions.distribution.Distribution`,
Can be a distribution class instance or ``str``.
distribution_kwargs (dict, optional): ``dict`` of keyword arguments
for the :attr:`distribution`. Its keys are `str`, which are names
of keyword arguments; Its values are corresponding values for each
argument.
hparams (dict, optional): Hyperparameters. Missing
hyperparameter will be set to default values. See
:meth:`default_hparams` for the hyperparameter structure and
default values.
"""
def __init__(self,
output_size: OutputSize,
mlp_input_size: Union[torch.Size, MaybeTuple[int], int],
distribution: Union[Distribution, str] = 'MultivariateNormal',
distribution_kwargs: Optional[Dict[str, Any]] = None,
hparams: Optional[HParams] = None):
super().__init__(output_size, hparams=hparams)
if distribution_kwargs is None:
distribution_kwargs = {}
self._dstr_type = distribution
self._dstr_kwargs = distribution_kwargs
for dstr_attr, dstr_val in distribution_kwargs.items():
if isinstance(dstr_val, torch.Tensor):
dstr_param = nn.Parameter(dstr_val)
distribution_kwargs[dstr_attr] = dstr_param
self.register_parameter(dstr_attr, dstr_param)
if isinstance(mlp_input_size, int):
input_feature = mlp_input_size
else:
input_feature = np.prod(mlp_input_size)
self._linear_layer = nn.Linear(
input_feature, _sum_output_size(output_size))
self._activation_fn = get_activation_fn(
self.hparams.activation_fn)
@staticmethod
def default_hparams() -> dict:
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
"activation_fn": "texar.torch.core.layers.identity",
"name": "reparameterized_stochastic_connector"
}
Here:
`"activation_fn"`: str
The activation function applied to the outputs of the MLP
transformation layer. Can be a function, or its name or module path.
`"name"`: str
Name of the connector.
"""
return {
"activation_fn": "texar.torch.core.layers.identity",
"name": "reparameterized_stochastic_connector"
}
def forward(self, # type: ignore
num_samples: Optional[Union[int, torch.Tensor]] = None,
transform: bool = True) -> Tuple[Any, Any]:
r"""Samples from a distribution and optionally performs transformation
with an MLP layer.
The distribution must be reparameterizable, i.e.,
:python:`Distribution.has_rsample == True`.
Args:
num_samples (optional): An ``int`` or ``int`` tensor.
Number of samples to generate. If not given,
generate a single sample. Note that if batch size has
already been included in :attr:`distribution`'s dimensionality,
:attr:`num_samples` should be left as ``None``.
transform (bool): Whether to perform MLP transformation of the
distribution samples. If ``False``, the structure/shape of a
sample must match :attr:`output_size`.
:returns:
A tuple (:attr:`output`, :attr:`sample`), where
- output: A tensor or a (nested) tuple of Tensors with
the same structure and size of :attr:`output_size`.
The batch dimension equals :attr:`num_samples` if specified,
or is determined by the distribution dimensionality.
If :attr:`transform` is `False`, it will be
equal to :attr:`sample`.
- sample: The sample from the distribution, prior to transformation.
Otherwise, returns a tensor :attr:`sample`, where
- sample: The sample from the distribution, prior to transformation.
Raises:
ValueError: If distribution is not reparameterizable.
ValueError: The output does not match :attr:`output_size`.
"""
if isinstance(self._dstr_type, str):
dstr: Distribution = utils.check_or_get_instance(
self._dstr_type, self._dstr_kwargs,
["torch.distributions", "texar.torch.custom"])
else:
dstr = self._dstr_type
if not dstr.has_rsample:
raise ValueError("Distribution should be reparameterizable")
if num_samples:
sample = dstr.rsample([num_samples])
else:
sample = dstr.rsample()
if transform:
output = _mlp_transform(
sample,
self._output_size,
self._linear_layer,
self._activation_fn)
_assert_same_size(output, self._output_size)
else:
output = sample
return output, sample
class StochasticConnector(ConnectorBase):
r"""Samples from a distribution and transforms samples into specified size.
The connector is the same as
:class:`~texar.torch.modules.ReparameterizedStochasticConnector`, except
that here reparameterization is disabled, and thus the gradients cannot be
back-propagated through the stochastic samples.
Args:
output_size: Size of output **excluding** the batch dimension. For
example, set ``output_size`` to ``dim`` to generate output of
shape ``[batch_size, dim]``.
Can be an ``int``, a tuple of ``int``, a torch.Size, or a tuple of
torch.Size.
For example, to transform inputs to have decoder state size, set
:python:`output_size=decoder.state_size`.
mlp_input_size: Size of MLP transfer process input, which is equal to
the distribution result size **excluding** the batch dimension,
Can be ``int`` or ``torch.Size`` or a tuple of ``int``.
distribution: A instance of subclass of
:torch:`distributions.distribution.Distribution`,
Can be a class, its name or module path, or a class instance.
The :attr:`distribution` should not be reparameterizable.
distribution_kwargs (dict, optional): ``dict`` of keyword arguments
for the :attr:`distribution`. Its keys are `str`, which are names
of keyword arguments; Its values are corresponding values for each
argument.
hparams (dict, optional): Hyperparameters. Missing
hyperparameter will be set to default values. See
:meth:`default_hparams` for the hyperparameter structure and
default values.
"""
def __init__(self,
output_size: OutputSize,
mlp_input_size: Union[torch.Size, MaybeTuple[int], int],
distribution: Union[Distribution, str] = 'MultivariateNormal',
distribution_kwargs: Optional[Dict[str, Any]] = None,
hparams: Optional[HParams] = None):
super().__init__(output_size, hparams=hparams)
if distribution_kwargs is None:
distribution_kwargs = {}
self._dstr_kwargs = distribution_kwargs
if isinstance(distribution, str):
self._dstr: Distribution = utils.check_or_get_instance(
distribution, self._dstr_kwargs,
["torch.distributions", "texar.torch.custom"])
else:
self._dstr = distribution
if self._dstr.has_rsample:
raise ValueError("Distribution should not be reparameterizable")
if isinstance(mlp_input_size, int):
input_feature = mlp_input_size
else:
input_feature = np.prod(mlp_input_size)
self._linear_layer = nn.Linear(
input_feature, _sum_output_size(output_size))
self._activation_fn = get_activation_fn(
self.hparams.activation_fn)
@staticmethod
def default_hparams():
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
"activation_fn": "texar.torch.core.layers.identity",
"name": "stochastic_connector"
}
Here:
`"activation_fn"`: str
The activation function applied to the outputs of the MLP
transformation layer. Can
be a function, or its name or module path.
`"name"`: str
Name of the connector.
"""
return {
"activation_fn": "texar.torch.core.layers.identity",
"name": "stochastic_connector"
}
def forward(self, # type: ignore
num_samples: Optional[Union[int, torch.Tensor]] = None,
transform: bool = False) -> Any:
r"""Samples from a distribution and optionally performs transformation
with an MLP layer.
The inputs and outputs are the same as
:class:`~texar.torch.modules.ReparameterizedStochasticConnector` except
that the distribution does not need to be reparameterizable, and
gradient cannot be back-propagate through the samples.
Args:
num_samples (optional): An ``int`` or ``int`` tensor.
Number of samples to generate. If not given,
generate a single sample. Note that if batch size has
already been included in :attr:`distribution`'s dimensionality,
:attr:`num_samples` should be left as ``None``.
transform (bool): Whether to perform MLP transformation of the
distribution samples. If ``False``, the structure/shape of a
sample must match :attr:`output_size`.
:returns:
A tuple (:attr:`output`, :attr:`sample`), where
- output: A tensor or a (nested) tuple of Tensors with
the same structure and size of :attr:`output_size`.
The batch dimension equals :attr:`num_samples` if specified,
or is determined by the distribution dimensionality.
If :attr:`transform` is `False`, it will be
equal to :attr:`sample`.
- sample: The sample from the distribution, prior to transformation.
Raises:
ValueError: If distribution can be reparameterizable.
ValueError: The output does not match :attr:`output_size`.
"""
if num_samples:
sample = self._dstr.sample([num_samples])
else:
sample = self._dstr.sample()
if self._dstr.event_shape == []:
sample = torch.reshape(
input=sample, shape=sample.size() + torch.Size([1]))
# Disable gradients through samples
sample = sample.detach().float()
if transform:
output = _mlp_transform(
sample,
self._output_size,
self._linear_layer,
self._activation_fn)
_assert_same_size(output, self._output_size)
else:
output = sample
return output, sample
# class ConcatConnector(ConnectorBase):
# r"""Concatenates multiple connectors into one connector. Used in, e.g.,
# semi-supervised variational autoencoders, disentangled representation
# learning, and other models.
#
# Args:
# output_size: Size of output excluding the batch dimension (eg.
# :attr:`output_size = p` if :attr:`output.shape` is :attr:`[N, p]`).
# Can be an int, a tuple of int, a torch.Size, or a tuple of
# torch.Size.
# For example, to transform to decoder state size, set
# `output_size=decoder.cell.state_size`.
# hparams (dict): Hyperparameters of the connector.
# """
#
# def __init__(self, output_size, hparams=None):
# super().__init__(self, output_size, hparams)
#
# @staticmethod
# def default_hparams():
# r"""Returns a dictionary of hyperparameters with default values.
#
# Returns:
#
# .. code-block:: python
#
# {
# "activation_fn": "texar.torch.core.layers.identity",
# "name": "concat_connector"
# }
#
# Here:
#
# `"activation_fn"`: (str or callable)
# The name or full path to the activation function applied to
# the outputs of the MLP layer. The activation functions can be:
#
# - Built-in activation functions defined in :
# - User-defined activation functions in `texar.torch.custom`.
# - External activation functions. Must provide the full path, \
# e.g., "my_module.my_activation_fn".
#
# The default value is :attr:`"identity"`, i.e., the MLP
# transformation is linear.
#
# `"name"`: str
# Name of the connector.
#
# The default value is "concat_connector".
# """
# return {
# "activation_fn": "texar.torch.core.layers.identity",
# "name": "concat_connector"
# }
#
# def forward(self, connector_inputs, transform=True):
# r"""Concatenate multiple input connectors
#
# Args:
# connector_inputs: a list of connector states
# transform (bool): If `True`, then the output are automatically
# transformed to match :attr:`output_size`.
#
# Returns:
# A Tensor or a (nested) tuple of Tensors of the same structure of
# the decoder state.
# """
# connector_inputs = [connector.float()
# for connector in connector_inputs]
# output = torch.cat(connector_inputs, dim=1)
#
# if transform:
# fn_modules = ['texar.torch.custom', 'torch', 'torch.nn']
# activation_fn = utils.get_function(self.hparams.activation_fn,
# fn_modules)
# output, linear_layer = _mlp_transform(
# output, self._output_size, activation_fn)
# self._linear_layers.append(linear_layer)
# _assert_same_size(output, self._output_size)
#
# self._add_internal_trainable_variables()
# self._built = True
#
# return output
| [
"numpy.prod",
"torch.split",
"texar.torch.utils.utils.map_structure",
"texar.torch.core.get_activation_fn",
"torch.full",
"texar.torch.utils.nest.pack_sequence_as",
"texar.torch.utils.nest.flatten",
"torch.Size",
"texar.torch.utils.utils.check_or_get_instance",
"torch.nn.Parameter",
"torch.resha... | [((1929, 1954), 'texar.torch.utils.nest.flatten', 'nest.flatten', (['output_size'], {}), '(output_size)\n', (1941, 1954), False, 'from texar.torch.utils import nest\n'), ((1973, 1994), 'texar.torch.utils.nest.flatten', 'nest.flatten', (['outputs'], {}), '(outputs)\n', (1985, 1994), False, 'from texar.torch.utils import nest\n'), ((3217, 3242), 'texar.torch.utils.nest.flatten', 'nest.flatten', (['output_size'], {}), '(output_size)\n', (3229, 3242), False, 'from texar.torch.utils import nest\n'), ((4480, 4500), 'texar.torch.utils.nest.flatten', 'nest.flatten', (['inputs'], {}), '(inputs)\n', (4492, 4500), False, 'from texar.torch.utils import nest\n'), ((4582, 4606), 'torch.cat', 'torch.cat', (['flat_input', '(1)'], {}), '(flat_input, 1)\n', (4591, 4606), False, 'import torch\n'), ((4658, 4683), 'texar.torch.utils.nest.flatten', 'nest.flatten', (['output_size'], {}), '(output_size)\n', (4670, 4683), False, 'from texar.torch.utils import nest\n'), ((4935, 4975), 'torch.split', 'torch.split', (['fc_output', 'size_list'], {'dim': '(1)'}), '(fc_output, size_list, dim=1)\n', (4946, 4975), False, 'import torch\n'), ((5219, 5290), 'texar.torch.utils.nest.pack_sequence_as', 'nest.pack_sequence_as', ([], {'structure': 'output_size', 'flat_sequence': 'flat_output'}), '(structure=output_size, flat_sequence=flat_output)\n', (5240, 5290), False, 'from texar.torch.utils import nest\n'), ((8745, 8796), 'texar.torch.utils.utils.map_structure', 'utils.map_structure', (['full_tensor', 'self._output_size'], {}), '(full_tensor, self._output_size)\n', (8764, 8796), False, 'from texar.torch.utils import utils\n'), ((11614, 11634), 'texar.torch.utils.nest.flatten', 'nest.flatten', (['inputs'], {}), '(inputs)\n', (11626, 11634), False, 'from texar.torch.utils import nest\n'), ((11652, 11704), 'texar.torch.utils.nest.pack_sequence_as', 'nest.pack_sequence_as', (['self._output_size', 'flat_input'], {}), '(self._output_size, flat_input)\n', (11673, 11704), False, 'from texar.torch.utils import nest\n'), ((13957, 14002), 'texar.torch.core.get_activation_fn', 'get_activation_fn', (['self.hparams.activation_fn'], {}), '(self.hparams.activation_fn)\n', (13974, 14002), False, 'from texar.torch.core import get_activation_fn\n'), ((19384, 19429), 'texar.torch.core.get_activation_fn', 'get_activation_fn', (['self.hparams.activation_fn'], {}), '(self.hparams.activation_fn)\n', (19401, 19429), False, 'from texar.torch.core import get_activation_fn\n'), ((25755, 25800), 'texar.torch.core.get_activation_fn', 'get_activation_fn', (['self.hparams.activation_fn'], {}), '(self.hparams.activation_fn)\n', (25772, 25800), False, 'from texar.torch.core import get_activation_fn\n'), ((2846, 2860), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (2853, 2860), True, 'import numpy as np\n'), ((5088, 5124), 'torch.reshape', 'torch.reshape', (['output', '((-1,) + shape)'], {}), '(output, (-1,) + shape)\n', (5101, 5124), False, 'import torch\n'), ((19231, 19254), 'numpy.prod', 'np.prod', (['mlp_input_size'], {}), '(mlp_input_size)\n', (19238, 19254), True, 'import numpy as np\n'), ((22053, 22168), 'texar.torch.utils.utils.check_or_get_instance', 'utils.check_or_get_instance', (['self._dstr_type', 'self._dstr_kwargs', "['torch.distributions', 'texar.torch.custom']"], {}), "(self._dstr_type, self._dstr_kwargs, [\n 'torch.distributions', 'texar.torch.custom'])\n", (22080, 22168), False, 'from texar.torch.utils import utils\n'), ((25166, 25278), 'texar.torch.utils.utils.check_or_get_instance', 'utils.check_or_get_instance', (['distribution', 'self._dstr_kwargs', "['torch.distributions', 'texar.torch.custom']"], {}), "(distribution, self._dstr_kwargs, [\n 'torch.distributions', 'texar.torch.custom'])\n", (25193, 25278), False, 'from texar.torch.utils import utils\n'), ((25602, 25625), 'numpy.prod', 'np.prod', (['mlp_input_size'], {}), '(mlp_input_size)\n', (25609, 25625), True, 'import numpy as np\n'), ((8604, 8645), 'torch.full', 'torch.full', (['((batch_size,) + x)', 'self.value'], {}), '((batch_size,) + x, self.value)\n', (8614, 8645), False, 'import torch\n'), ((8687, 8726), 'torch.full', 'torch.full', (['(batch_size, x)', 'self.value'], {}), '((batch_size, x), self.value)\n', (8697, 8726), False, 'import torch\n'), ((18956, 18978), 'torch.nn.Parameter', 'nn.Parameter', (['dstr_val'], {}), '(dstr_val)\n', (18968, 18978), False, 'from torch import nn\n'), ((28580, 28595), 'torch.Size', 'torch.Size', (['[1]'], {}), '([1])\n', (28590, 28595), False, 'import torch\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example structure for the var-misuse task."""
from typing import List, Optional
import dataclasses
import numpy as np
from tensor2tensor.data_generators import text_encoder
from tensorflow.io import gfile
from gfsa import automaton_builder
from gfsa import generic_ast_graphs
from gfsa import graph_types
from gfsa import jax_util
from gfsa import sparse_operator
from gfsa.datasets import graph_bundle
from gfsa.datasets import graph_edge_util
EDGE_NTH_CHILD_MAX = 32
@dataclasses.dataclass
class ExampleEncodingInfo:
"""Keeps track of objects needed to encode and decode examples.
Attributes:
ast_spec: AST spec defining how to encode an AST.
token_encoder: Subword encoder for encoding syntax tokens.
schema: Automaton schema for the produced graphs. Generated automatically.
edge_types: List of all edge types produced by the encoding. Generated
automatically.
builder: Automaton builder for the produced graphs. Generated automatically.
"""
# Provided at initialization time.
ast_spec: generic_ast_graphs.ASTSpec
token_encoder: text_encoder.SubwordTextEncoder
# Generated automatically in __post_init__ from `ast_spec`.
schema: graph_types.GraphSchema = dataclasses.field(init=False)
edge_types: List[str] = dataclasses.field(init=False)
builder: automaton_builder.AutomatonBuilder = dataclasses.field(init=False)
def __post_init__(self):
"""Populates non-init fields based on `ast_spec`."""
self.schema = generic_ast_graphs.build_ast_graph_schema(self.ast_spec)
self.edge_types = sorted({
graph_edge_util.SAME_IDENTIFIER_EDGE_TYPE,
*graph_edge_util.PROGRAM_GRAPH_EDGE_TYPES,
*graph_edge_util.schema_edge_types(self.schema),
*graph_edge_util.nth_child_edge_types(EDGE_NTH_CHILD_MAX),
})
self.builder = automaton_builder.AutomatonBuilder(self.schema)
@classmethod
def from_files(cls, ast_spec_path,
encoder_vocab_path):
"""Builds an ExampleEncodingInfo object from files.
Args:
ast_spec_path: Path to a text file containing an AST spec definition.
Format is expected to be a Python expression for a
generic_ast_graphs.ASTSpec (as produced by `repr`). (Note that we assume
that the source is trusted and safe to `eval`.)
encoder_vocab_path: Path to a text file containing the vocabulary for a
SubwordTextEncoder.
Returns:
A ExampleEncodingInfo populated with the contents of the given files.
"""
with gfile.GFile(ast_spec_path, "r") as fp:
ast_spec = eval(fp.read(), generic_ast_graphs.__dict__) # pylint: disable=eval-used
token_encoder = text_encoder.SubwordTextEncoder(encoder_vocab_path)
return ExampleEncodingInfo(ast_spec=ast_spec, token_encoder=token_encoder)
@jax_util.register_dataclass_pytree
@dataclasses.dataclass
class GraphBundleWithTokens:
"""Graph bundle that also has a collection of tokens for each node.
Attributes:
bundle: Graph bundle representing the graph.
tokens: Sparse operator mapping from an array of token embeddings to a list
of nodes. Each node may have an arbitrary number of tokens (including
zero). The tokens are considered to be unordered, and repeated tokens can
be represented as entries in `tokens` with values greater than 1.
"""
bundle: graph_bundle.GraphBundle
tokens: sparse_operator.SparseCoordOperator
@jax_util.register_dataclass_pytree
@dataclasses.dataclass
class GraphBundleWithTokensPaddingConfig:
"""Configuration specifying how examples get padded to a constant shape.
Attributes:
bundle_padding: PaddingConfig for the `bundle` attribute.
max_tokens: Maximum number of entries in the `tokens` operator; in other
words, the maximum number of unique (node, token) pairs allowed.
"""
bundle_padding: graph_bundle.PaddingConfig
max_tokens: int
@jax_util.register_dataclass_pytree
@dataclasses.dataclass
class VarMisuseExample:
"""An example for a var misuse problem.
Attributes:
input_graph: Graph bundle with token information, containing the buggy
version of the example.
bug_node_index: Index of the node that corresponds to the variable misuse,
or -1 if there is no bug.
repair_node_mask: <bool[num_nodes]> array with True at locations that
contain the correct replacement for the misused identifier.
candidate_node_mask: <bool[num_nodes]> array with True at locations that
contain an identifier that could be used as a repair target.
unique_candidate_operator: Sparse operator mapping from nodes to unique
candidate identifiers. We always pad this to the same length as the number
of nodes in the graph; this should be fine for any real-world program. The
identifier 0 is always the "no-bug" sentinel identifier.
repair_id: ID for the correct repair.
"""
input_graph: GraphBundleWithTokens
bug_node_index: jax_util.NDArray
repair_node_mask: jax_util.NDArray
candidate_node_mask: jax_util.NDArray
unique_candidate_operator: sparse_operator.SparseCoordOperator
repair_id: jax_util.NDArray
def pad_example(example,
config,
allow_failure = False):
"""Pads an example so that it has a static shape determined by the config.
Args:
example: The example to pad.
config: Configuration specifying the desired padding size.
allow_failure: If True, returns None instead of failing if example is too
large.
Returns:
A padded example with static shape.
Raises:
ValueError: If the graph is too big to pad to this size.
"""
if example.input_graph.tokens.values.shape[0] > config.max_tokens:
if allow_failure:
return None
raise ValueError("Example has too many tokens.")
bundle = graph_bundle.pad_example(example.input_graph.bundle,
config.bundle_padding, allow_failure)
if bundle is None:
return None
return VarMisuseExample(
input_graph=GraphBundleWithTokens(
bundle=bundle,
tokens=example.input_graph.tokens.pad_nonzeros(config.max_tokens),
),
bug_node_index=example.bug_node_index,
repair_node_mask=jax_util.pad_to(
example.repair_node_mask,
config.bundle_padding.static_max_metadata.num_nodes),
candidate_node_mask=jax_util.pad_to(
example.candidate_node_mask,
config.bundle_padding.static_max_metadata.num_nodes),
unique_candidate_operator=example.unique_candidate_operator.pad_nonzeros(
config.bundle_padding.static_max_metadata.num_nodes),
repair_id=example.repair_id)
def zeros_like_padded_example(
config):
"""Builds a VarMisuseExample containing only zeros.
This can be useful to initialize model parameters, or do tests.
Args:
config: Configuration specifying the desired padding size.
Returns:
An "example" filled with zeros of the given size.
"""
return VarMisuseExample(
input_graph=GraphBundleWithTokens(
bundle=graph_bundle.zeros_like_padded_example(config.bundle_padding),
tokens=sparse_operator.SparseCoordOperator(
input_indices=np.zeros(
shape=(config.max_tokens, 1), dtype=np.int32),
output_indices=np.zeros(
shape=(config.max_tokens, 1), dtype=np.int32),
values=np.zeros(shape=(config.max_tokens,), dtype=np.int32))),
bug_node_index=-1,
repair_node_mask=np.zeros(
shape=(config.bundle_padding.static_max_metadata.num_nodes,),
dtype=np.float32),
candidate_node_mask=np.zeros(
shape=(config.bundle_padding.static_max_metadata.num_nodes,),
dtype=np.float32),
unique_candidate_operator=sparse_operator.SparseCoordOperator(
input_indices=np.zeros(shape=(config.max_tokens, 1), dtype=np.int32),
output_indices=np.zeros(shape=(config.max_tokens, 1), dtype=np.int32),
values=np.zeros(shape=(config.max_tokens,), dtype=np.float32)),
repair_id=0)
| [
"gfsa.datasets.graph_edge_util.nth_child_edge_types",
"tensorflow.io.gfile.GFile",
"gfsa.generic_ast_graphs.build_ast_graph_schema",
"gfsa.automaton_builder.AutomatonBuilder",
"gfsa.datasets.graph_bundle.pad_example",
"tensor2tensor.data_generators.text_encoder.SubwordTextEncoder",
"gfsa.jax_util.pad_to... | [((1820, 1849), 'dataclasses.field', 'dataclasses.field', ([], {'init': '(False)'}), '(init=False)\n', (1837, 1849), False, 'import dataclasses\n'), ((1876, 1905), 'dataclasses.field', 'dataclasses.field', ([], {'init': '(False)'}), '(init=False)\n', (1893, 1905), False, 'import dataclasses\n'), ((1954, 1983), 'dataclasses.field', 'dataclasses.field', ([], {'init': '(False)'}), '(init=False)\n', (1971, 1983), False, 'import dataclasses\n'), ((6395, 6489), 'gfsa.datasets.graph_bundle.pad_example', 'graph_bundle.pad_example', (['example.input_graph.bundle', 'config.bundle_padding', 'allow_failure'], {}), '(example.input_graph.bundle, config.bundle_padding,\n allow_failure)\n', (6419, 6489), False, 'from gfsa.datasets import graph_bundle\n'), ((2087, 2143), 'gfsa.generic_ast_graphs.build_ast_graph_schema', 'generic_ast_graphs.build_ast_graph_schema', (['self.ast_spec'], {}), '(self.ast_spec)\n', (2128, 2143), False, 'from gfsa import generic_ast_graphs\n'), ((2427, 2474), 'gfsa.automaton_builder.AutomatonBuilder', 'automaton_builder.AutomatonBuilder', (['self.schema'], {}), '(self.schema)\n', (2461, 2474), False, 'from gfsa import automaton_builder\n'), ((3269, 3320), 'tensor2tensor.data_generators.text_encoder.SubwordTextEncoder', 'text_encoder.SubwordTextEncoder', (['encoder_vocab_path'], {}), '(encoder_vocab_path)\n', (3300, 3320), False, 'from tensor2tensor.data_generators import text_encoder\n'), ((3118, 3149), 'tensorflow.io.gfile.GFile', 'gfile.GFile', (['ast_spec_path', '"""r"""'], {}), "(ast_spec_path, 'r')\n", (3129, 3149), False, 'from tensorflow.io import gfile\n'), ((6806, 6905), 'gfsa.jax_util.pad_to', 'jax_util.pad_to', (['example.repair_node_mask', 'config.bundle_padding.static_max_metadata.num_nodes'], {}), '(example.repair_node_mask, config.bundle_padding.\n static_max_metadata.num_nodes)\n', (6821, 6905), False, 'from gfsa import jax_util\n'), ((6949, 7051), 'gfsa.jax_util.pad_to', 'jax_util.pad_to', (['example.candidate_node_mask', 'config.bundle_padding.static_max_metadata.num_nodes'], {}), '(example.candidate_node_mask, config.bundle_padding.\n static_max_metadata.num_nodes)\n', (6964, 7051), False, 'from gfsa import jax_util\n'), ((8093, 8185), 'numpy.zeros', 'np.zeros', ([], {'shape': '(config.bundle_padding.static_max_metadata.num_nodes,)', 'dtype': 'np.float32'}), '(shape=(config.bundle_padding.static_max_metadata.num_nodes,),\n dtype=np.float32)\n', (8101, 8185), True, 'import numpy as np\n'), ((8230, 8322), 'numpy.zeros', 'np.zeros', ([], {'shape': '(config.bundle_padding.static_max_metadata.num_nodes,)', 'dtype': 'np.float32'}), '(shape=(config.bundle_padding.static_max_metadata.num_nodes,),\n dtype=np.float32)\n', (8238, 8322), True, 'import numpy as np\n'), ((2286, 2332), 'gfsa.datasets.graph_edge_util.schema_edge_types', 'graph_edge_util.schema_edge_types', (['self.schema'], {}), '(self.schema)\n', (2319, 2332), False, 'from gfsa.datasets import graph_edge_util\n'), ((2343, 2399), 'gfsa.datasets.graph_edge_util.nth_child_edge_types', 'graph_edge_util.nth_child_edge_types', (['EDGE_NTH_CHILD_MAX'], {}), '(EDGE_NTH_CHILD_MAX)\n', (2379, 2399), False, 'from gfsa.datasets import graph_edge_util\n'), ((7644, 7705), 'gfsa.datasets.graph_bundle.zeros_like_padded_example', 'graph_bundle.zeros_like_padded_example', (['config.bundle_padding'], {}), '(config.bundle_padding)\n', (7682, 7705), False, 'from gfsa.datasets import graph_bundle\n'), ((8434, 8488), 'numpy.zeros', 'np.zeros', ([], {'shape': '(config.max_tokens, 1)', 'dtype': 'np.int32'}), '(shape=(config.max_tokens, 1), dtype=np.int32)\n', (8442, 8488), True, 'import numpy as np\n'), ((8515, 8569), 'numpy.zeros', 'np.zeros', ([], {'shape': '(config.max_tokens, 1)', 'dtype': 'np.int32'}), '(shape=(config.max_tokens, 1), dtype=np.int32)\n', (8523, 8569), True, 'import numpy as np\n'), ((8588, 8642), 'numpy.zeros', 'np.zeros', ([], {'shape': '(config.max_tokens,)', 'dtype': 'np.float32'}), '(shape=(config.max_tokens,), dtype=np.float32)\n', (8596, 8642), True, 'import numpy as np\n'), ((7789, 7843), 'numpy.zeros', 'np.zeros', ([], {'shape': '(config.max_tokens, 1)', 'dtype': 'np.int32'}), '(shape=(config.max_tokens, 1), dtype=np.int32)\n', (7797, 7843), True, 'import numpy as np\n'), ((7893, 7947), 'numpy.zeros', 'np.zeros', ([], {'shape': '(config.max_tokens, 1)', 'dtype': 'np.int32'}), '(shape=(config.max_tokens, 1), dtype=np.int32)\n', (7901, 7947), True, 'import numpy as np\n'), ((7989, 8041), 'numpy.zeros', 'np.zeros', ([], {'shape': '(config.max_tokens,)', 'dtype': 'np.int32'}), '(shape=(config.max_tokens,), dtype=np.int32)\n', (7997, 8041), True, 'import numpy as np\n')] |
# import libraries
from flask import Flask, jsonify, request
import feature
from tensorflow import keras
import numpy as np
# Creating Flask app
app = Flask(__name__)
# Loading pre-trained keras model
def loadModel():
path = "F:\Chrome Extension\Trawling-Chrome-Extention\Server\models48xLSTM-32xDense"
model = keras.models.load_model(path)
return model
model = loadModel()
# Make prediction using loaded keras model
def make_predict(url):
x_pedict = feature.featureExtraction(url)
print(x_pedict, " - Extracted features")
x_pedict = np.array(x_pedict)
x_pedict = np.reshape(x_pedict, (1, 1, x_pedict.shape[0]))
prediction = model.predict(x_pedict)
print(prediction[0], " - Predicted value before thresholding")
return thresholding(prediction[0])
# Thresholding
# legitmate <= 0.8 < phishing
def thresholding(prediction):
threshold = 0.8
if prediction > threshold:
return 1
else:
return 0
#routing post methods
@app.route("/", methods=['GET', 'POST'])
def index():
if (request.method == "POST"):
response = request.get_json()
predict = make_predict(response["url"])
print(predict, "- predicted value")
return jsonify({"state": predict })
else:
print("Error - Not recieved POST method")
return jsonify({"state":-1})
if __name__ == "__main__":
app.run(debug=True) | [
"numpy.reshape",
"flask.Flask",
"flask.jsonify",
"numpy.array",
"flask.request.get_json",
"tensorflow.keras.models.load_model",
"feature.featureExtraction"
] | [((153, 168), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (158, 168), False, 'from flask import Flask, jsonify, request\n'), ((324, 353), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['path'], {}), '(path)\n', (347, 353), False, 'from tensorflow import keras\n'), ((475, 505), 'feature.featureExtraction', 'feature.featureExtraction', (['url'], {}), '(url)\n', (500, 505), False, 'import feature\n'), ((566, 584), 'numpy.array', 'np.array', (['x_pedict'], {}), '(x_pedict)\n', (574, 584), True, 'import numpy as np\n'), ((600, 647), 'numpy.reshape', 'np.reshape', (['x_pedict', '(1, 1, x_pedict.shape[0])'], {}), '(x_pedict, (1, 1, x_pedict.shape[0]))\n', (610, 647), True, 'import numpy as np\n'), ((1101, 1119), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1117, 1119), False, 'from flask import Flask, jsonify, request\n'), ((1227, 1254), 'flask.jsonify', 'jsonify', (["{'state': predict}"], {}), "({'state': predict})\n", (1234, 1254), False, 'from flask import Flask, jsonify, request\n'), ((1331, 1353), 'flask.jsonify', 'jsonify', (["{'state': -1}"], {}), "({'state': -1})\n", (1338, 1353), False, 'from flask import Flask, jsonify, request\n')] |
import numpy as np
from . import ilossfunc
class SumSquareError(ilossfunc.ILossFunc):
def __init__(self):
pass
def get_loss(self, prediction: np.array, label: np.array) -> np.array:
if prediction.ndim == 1:
prediction = prediction.reshape(1, prediction.size)
label = label.reshape(1, label.size)
batch_size = prediction.shape[0]
return 0.5 * np.sum((prediction - label) ** 2) / batch_size
| [
"numpy.sum"
] | [((410, 443), 'numpy.sum', 'np.sum', (['((prediction - label) ** 2)'], {}), '((prediction - label) ** 2)\n', (416, 443), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# shape of sound buffers:
# array([[0., 0.],
# [0., 0.],
# [0., 0.],
# [0., 0.],
# [0., 0.],
# [0., 0.],
# [0., 0.],
# [0., 0.],
# [0., 0.],
# [0., 0.],
# [0., 0.],
# [0., 0.],
# [0., 0.],
# [0., 0.],
# [0., 0.],
# ...
# ])
import sys
import os
import random
import sounddevice as sd
import soundfile as sf
import pygame
import struct
import math
import pyaudio
import numpy as np
from itertools import count
from hsampler.ui import ModeButton, construct_test_button, Command
SAMPLE_DIR = "/home/hb9/projects/music/samples"
pa = pyaudio.PyAudio()
FORMAT = pyaudio.paFloat32
CHANNELS = 2
RATE = 44100
OUTPUT_BLOCK_TIME = 0.005
OUTPUT_FRAMES_PER_BLOCK = int(RATE * OUTPUT_BLOCK_TIME)
frame_counter = 0
KEY_SELECTED_RAW_SAMPLE = "selected_raw_sample"
KEY_RAW_SAMPLE_LIB = "raw_sample_lib"
KEY_SAMPLE_LIB = "sample_lib"
KEY_PLAYING_SAMPLES = "playing_samples"
KEY_SID = "sid"
KEY_LOOP = "loop"
KEY_SID_EDIT = "sid_edit"
KEY_SID_RESAMPLE = "sid_resample"
KEY_TRIM_STEP = "trim_step"
KEY_EDIT_MODE = "edit_mode"
PARAM_COL_OFFSET = 200
TRIM_STEP_ADJUST_STEP = 10
TRIM_STEP_DEFAULT = 300
MODE_TRIM_START = "trim_start"
MODE_TRIM_END = "trim_end"
MODE_TRIM_STEP = "trim_step"
EDIT_MODES = [
MODE_TRIM_START,
MODE_TRIM_END,
MODE_TRIM_STEP
]
APPCFG = {
"screen_width": 1024,
"screen_height": 786,
"btn_fn": {
"args": {
"pos_x": 10,
"pos_y": 10,
"width": 50,
"height": 18,
"title": "TAB",
},
"key": "TAB",
},
"btn_sample": [
{
"args": {
"pos_x": 10,
"pos_y": 50,
"width": 50,
"height": 18,
"title": "Q",
},
"key": "q",
},
{
"args": {
"pos_x": 100,
"pos_y": 50,
"width": 50,
"height": 18,
"title": "W",
},
"key": "w",
},
{
"args": {
"pos_x": 200,
"pos_y": 50,
"width": 50,
"height": 18,
"title": "E",
},
"key": "e",
},
{
"args": {
"pos_x": 10,
"pos_y": 100,
"width": 50,
"height": 18,
"title": "A",
},
"key": "a",
},
{
"args": {
"pos_x": 100,
"pos_y": 100,
"width": 50,
"height": 18,
"title": "S",
},
"key": "s",
},
{
"args": {
"pos_x": 200,
"pos_y": 100,
"width": 50,
"height": 18,
"title": "D",
},
"key": "d",
},
],
"btn_previous_raw_sample": {
"args": {
"pos_x": 10,
"pos_y": 720,
"width": 130,
"height": 18,
"title": "<- PAGE UP",
},
"key": "PAGE_UP",
},
"btn_next_raw_sample": {
"args": {
"pos_x": 150,
"pos_y": 720,
"width": 130,
"height": 18,
"title": "PAGE DOWN ->",
},
"key": "PAGE_DOWN",
},
"btn_load_raw_sample": {
"args": {
"pos_x": 300,
"pos_y": 720,
"width": 50,
"height": 18,
"title": "load",
},
"key": "l",
},
"btn_toggle_loop": {
"args": {
"pos_x": 100,
"pos_y": 10,
"width": 60,
"height": 18,
"title": "LOOP .",
},
"key": "PERIOD",
},
"btn_toggle_edit": {
"args": {
"pos_x": 180,
"pos_y": 10,
"width": 50,
"height": 18,
"title": "EDIT -",
},
"key": "MINUS",
},
"btn_resample": {
"args": {
"pos_x": 250,
"pos_y": 10,
"width": 90,
"height": 18,
"title": "RESAMPLE ,",
},
"key": "COMMA",
},
"btn_edit_up": {
"args": {
"pos_x": 900,
"pos_y": 700,
"width": 50,
"height": 18,
"title": "UP",
},
"key": "UP",
},
"btn_edit_down": {
"args": {
"pos_x": 900,
"pos_y": 740,
"width": 50,
"height": 18,
"title": "DOWN",
},
"key": "DOWN",
},
"btn_edit_left": {
"args": {
"pos_x": 850,
"pos_y": 720,
"width": 50,
"height": 18,
"title": "LEFT",
},
"key": "LEFT",
},
"btn_edit_right": {
"args": {
"pos_x": 950,
"pos_y": 720,
"width": 50,
"height": 18,
"title": "RIGHT",
},
"key": "RIGHT",
},
}
KEY_PYGAME_KEY_MAPPING = {
"TAB": pygame.K_TAB,
"q": pygame.K_q,
"w": pygame.K_w,
"e": pygame.K_e,
"a": pygame.K_a,
"s": pygame.K_s,
"d": pygame.K_d,
"l": pygame.K_l,
"PAGE_UP": pygame.K_PAGEUP,
"PAGE_DOWN": pygame.K_PAGEDOWN,
"PERIOD": pygame.K_PERIOD,
"MINUS": pygame.K_MINUS,
"UP": pygame.K_UP,
"DOWN": pygame.K_DOWN,
"LEFT": pygame.K_LEFT,
"RIGHT": pygame.K_RIGHT,
"COMMA": pygame.K_COMMA,
}
class RawSample:
def __init__(self, data, samplerate, file_name):
self.data = data
self.samplerate = samplerate
self.file_name = file_name
def num_frames(self):
return len(self.data)
class Sample:
def __init__(self, data, sid=None):
self._data = data
if sid is None:
self.sid = random.randint(0, 9999999)
else:
self.sid = sid
self.frame_start = 0
self.frame_end = len(self.data)
self.frame_end_raw = len(self.data)
@property
def data(self):
return self._data
@property
def title(self):
return f"sample_{self.sid}"
class Tape:
def __init__(self):
self.sid = random.randint(0, 9999999)
self.blocks = []
@property
def title(self):
return f"sample_{self.sid}"
def add_block(self, block):
self.blocks.append(block)
def to_sample(self):
data = np.concatenate(self.blocks)
sample = Sample(data[:], sid=self.sid)
return sample
class PlayingSample:
def __init__(self, sample, loop=False):
self.sample = sample
self.loop = loop
self._main_current_frame = self.sample.frame_start
def reset(self, loop=None):
if loop is not None:
self.loop = loop
self._main_current_frame = self.sample.frame_start
def stop(self):
self._main_current_frame = self.sample.frame_end
self.loop = False
def add_block_to(self, block):
if self.sample.frame_end - self._main_current_frame < OUTPUT_FRAMES_PER_BLOCK:
if self.loop:
self._main_current_frame = self.sample.frame_start
else:
return
block += self.sample.data[
self._main_current_frame : self._main_current_frame + OUTPUT_FRAMES_PER_BLOCK
]
self._main_current_frame += OUTPUT_FRAMES_PER_BLOCK
class SampleCommand(Command):
pass
class PlaySampleCommand(SampleCommand):
def execute(self, bid: int, context: dict):
try:
sample = context[KEY_SAMPLE_LIB][self.arguments.get(KEY_SID)]
except KeyError:
return
try:
context[KEY_PLAYING_SAMPLES][sample.sid].reset(self.arguments.get(KEY_LOOP, False))
except KeyError:
context[KEY_PLAYING_SAMPLES][sample.sid] = PlayingSample(
sample, self.arguments.get(KEY_LOOP, False)
)
class ReleaseSampleCommand(SampleCommand):
def execute(self, bid: int, context: dict):
if self.arguments.get(KEY_LOOP, False):
context[KEY_PLAYING_SAMPLES][self.arguments[KEY_SID]].stop()
class ContextFnCommand(Command):
def __init__(self, cmd_name, fn, arguments={}):
self.fn = fn
super().__init__(cmd_name, arguments)
def execute(self, bid, context):
self.fn(bid, context)
class SampleContextFnCommand(SampleCommand, ContextFnCommand):
pass
class App:
def __init__(self, sample_dir=SAMPLE_DIR, cfg=APPCFG):
pygame.init()
self.cfg = cfg
self.screen = pygame.display.set_mode((self.cfg["screen_width"], self.cfg["screen_height"]))
self.keys = {}
self._main_stream = pa.open(format=FORMAT, channels=CHANNELS, rate=RATE, output=True)
self._samples = {}
self._playing_samples = {}
self._keys_down = []
self._keys_up = []
self._buffer = np.zeros((OUTPUT_FRAMES_PER_BLOCK, CHANNELS))
self._key_code_button = {}
self.context = {
"screen": self.screen,
"font_main": pygame.font.SysFont("arial", 14),
"font_sub": pygame.font.SysFont("arial", 10),
"buttons": self._key_code_button,
KEY_SID_EDIT: None,
KEY_SID_RESAMPLE: None,
KEY_RAW_SAMPLE_LIB: [],
KEY_SAMPLE_LIB: {},
KEY_PLAYING_SAMPLES: {},
KEY_SELECTED_RAW_SAMPLE: 0,
KEY_TRIM_STEP: TRIM_STEP_DEFAULT,
KEY_EDIT_MODE: 0
}
self._load_raw_samples(sample_dir)
self._construct_buttons()
def _construct_buttons(self):
# FN button
current_btn_cfg = self.cfg["btn_fn"]
current_btn = ModeButton(
bid=self.num_buttons,
cmd_push=Command.cmd_nop(),
cmd_release=Command.cmd_nop(),
**current_btn_cfg["args"]
)
self._add_button(current_btn, current_btn_cfg["key"])
# sample buttons
for current_btn_cfg in self.cfg["btn_sample"]:
current_btn = ModeButton(
bid=self.num_buttons,
cmd_push=PlaySampleCommand("play_sample", {}),
cmd_release=ReleaseSampleCommand("release_sample", {}),
**current_btn_cfg["args"]
)
self._add_button(current_btn, current_btn_cfg["key"])
# raw sample navigation
current_btn_cfg = self.cfg["btn_previous_raw_sample"]
current_btn = ModeButton(
bid=self.num_buttons,
cmd_push=ContextFnCommand("previous_raw_sample", previous_raw_sample),
cmd_release=Command.cmd_nop(),
**current_btn_cfg["args"]
)
self._add_button(current_btn, current_btn_cfg["key"])
current_btn_cfg = self.cfg["btn_next_raw_sample"]
current_btn = ModeButton(
bid=self.num_buttons,
cmd_push=ContextFnCommand("next_raw_sample", next_raw_sample),
cmd_release=Command.cmd_nop(),
**current_btn_cfg["args"]
)
self._add_button(current_btn, current_btn_cfg["key"])
current_btn_cfg = self.cfg["btn_load_raw_sample"]
current_btn = ModeButton(
bid=self.num_buttons,
cmd_push=ContextFnCommand("load_raw_sample", load_raw_sample),
cmd_release=Command.cmd_nop(),
**current_btn_cfg["args"]
)
self._add_button(current_btn, current_btn_cfg["key"])
current_btn_cfg = self.cfg["btn_toggle_loop"]
current_btn = ModeButton(
bid=self.num_buttons,
cmd_push=ContextFnCommand("toggle_loop", toggle_loop),
cmd_release=Command.cmd_nop(),
**current_btn_cfg["args"]
)
self._add_button(current_btn, current_btn_cfg["key"])
current_btn_cfg = self.cfg["btn_toggle_edit"]
current_btn = ModeButton(
bid=self.num_buttons,
cmd_push=ContextFnCommand("toggle_edit", toggle_edit),
cmd_release=Command.cmd_nop(),
**current_btn_cfg["args"]
)
self._add_button(current_btn, current_btn_cfg["key"])
current_btn_cfg = self.cfg["btn_edit_up"]
current_btn = ModeButton(
bid=self.num_buttons,
cmd_push=ContextFnCommand("toggle_edit", edit_up),
cmd_release=Command.cmd_nop(),
**current_btn_cfg["args"]
)
self._add_button(current_btn, current_btn_cfg["key"])
current_btn_cfg = self.cfg["btn_edit_down"]
current_btn = ModeButton(
bid=self.num_buttons,
cmd_push=ContextFnCommand("toggle_edit", edit_down),
cmd_release=Command.cmd_nop(),
**current_btn_cfg["args"]
)
self._add_button(current_btn, current_btn_cfg["key"])
current_btn_cfg = self.cfg["btn_edit_left"]
current_btn = ModeButton(
bid=self.num_buttons,
cmd_push=ContextFnCommand("toggle_edit", edit_left),
cmd_release=Command.cmd_nop(),
**current_btn_cfg["args"]
)
self._add_button(current_btn, current_btn_cfg["key"])
current_btn_cfg = self.cfg["btn_edit_right"]
current_btn = ModeButton(
bid=self.num_buttons,
cmd_push=ContextFnCommand("toggle_edit", edit_right),
cmd_release=Command.cmd_nop(),
**current_btn_cfg["args"]
)
self._add_button(current_btn, current_btn_cfg["key"])
current_btn_cfg = self.cfg["btn_resample"]
current_btn = ModeButton(
bid=self.num_buttons,
cmd_push=ContextFnCommand("toggle_resample", toggle_resample),
cmd_release=Command.cmd_nop(),
**current_btn_cfg["args"]
)
self._add_button(current_btn, current_btn_cfg["key"])
def _add_button(self, btn: ModeButton, key_name: str):
self._key_code_button[KEY_PYGAME_KEY_MAPPING[key_name]] = btn
@property
def num_buttons(self):
return len(self._key_code_button.keys())
def _load_raw_samples(self, sample_dir):
for file_name in os.listdir(sample_dir):
complete_path = os.path.join(sample_dir, file_name)
data, samplerate = sf.read(complete_path)
if not samplerate == RATE:
raise RuntimeError(f"Samplerate of {file_name} is {samplerate}!")
raw_sample = RawSample(data, samplerate, file_name)
self.context[KEY_RAW_SAMPLE_LIB].append(raw_sample)
print(f"loaded {file_name}")
def _handle_events(self):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
self._keys_down.append(event.key)
if event.type == pygame.KEYUP:
self._keys_up.append(event.key)
if event.type == pygame.QUIT:
# if it is quit the game
pygame.quit()
exit(0)
return self._keys_down, self._keys_up
def _print_selected_raw_sample(self):
text_current_sample = self.context[KEY_RAW_SAMPLE_LIB][
self.context[KEY_SELECTED_RAW_SAMPLE]
].file_name
text = self.context["font_main"].render(text_current_sample, True, (255, 255, 255))
self.screen.blit(text, (10, self.cfg["screen_height"] - 30))
def _print_params(self):
self._render_text(
"edit mode: {}".format(EDIT_MODES[self.context.get(KEY_EDIT_MODE)]),
pos_x=(self.cfg["screen_width"] - PARAM_COL_OFFSET),
pos_y=10,
)
self._render_text(
"resample sid: {}".format(self.context.get(KEY_SID_RESAMPLE)),
pos_x=(self.cfg["screen_width"] - PARAM_COL_OFFSET),
pos_y=40,
)
self._render_text(
"edit sid: {}".format(self.context.get(KEY_SID_EDIT)),
pos_x=(self.cfg["screen_width"] - PARAM_COL_OFFSET),
pos_y=70,
)
self._render_text(
"trim step: {}".format(self.context.get(KEY_TRIM_STEP)),
pos_x=(self.cfg["screen_width"] - PARAM_COL_OFFSET),
pos_y=100,
)
if (
EDIT_MODES[self.context[KEY_EDIT_MODE]] in (MODE_TRIM_START, MODE_TRIM_END)
and self.context[KEY_SID_EDIT] is not None
):
sample = self.context[KEY_SAMPLE_LIB][self.context[KEY_SID_EDIT]]
self._render_text(
"start: {}".format(sample.frame_start),
pos_x=(self.cfg["screen_width"] - PARAM_COL_OFFSET),
pos_y=130,
)
self._render_text(
"end: {}".format(sample.frame_end),
pos_x=(self.cfg["screen_width"] - PARAM_COL_OFFSET),
pos_y=160,
)
def _render_text(self, text, pos_x, pos_y, color=(255, 255, 255)):
text_element = self.context["font_main"].render(text, True, (255, 255, 255))
self.screen.blit(text_element, (pos_x, pos_y))
def run(self):
# for i, block in enumerate(sine_gen()):
while True:
self.screen.fill((0, 0, 0))
keys_down, keys_up = self._handle_events()
for key in keys_down:
btn = self._key_code_button.get(key, None)
if btn:
btn.push(self.context)
for key in keys_up:
btn = self._key_code_button.get(key, None)
if btn:
btn.release(self.context)
for btn in self._key_code_button.values():
btn.draw(self.context)
self._print_selected_raw_sample()
self._print_params()
pygame.display.flip()
self._keys_down.clear()
self._keys_up.clear()
self._buffer.fill(0)
for sample in self.context[KEY_PLAYING_SAMPLES].values():
sample.add_block_to(self._buffer)
if self.context[KEY_SID_RESAMPLE] is not None:
self.context[KEY_SAMPLE_LIB][self.context[KEY_SID_RESAMPLE]].add_block(
np.copy(self._buffer)
)
data = self._buffer.reshape((OUTPUT_FRAMES_PER_BLOCK * 2, 1)).squeeze(1)
data = data.astype(np.float32).tostring()
self._main_stream.write(data)
def button_by_bid(bid, context):
for btn in context["buttons"].values():
if btn.bid == bid:
return btn
def reset_buttons(context):
for btn in context["buttons"].values():
btn.reset()
def load_raw_sample(bid, context):
load_cmd = SampleContextFnCommand("load_into", load_raw_sample_into, arguments={})
for btn in context["buttons"].values():
if not isinstance(btn.default_cmd_push, SampleCommand):
continue
btn.set_cmd_push(load_cmd)
def toggle_loop(bid, context):
toggle_cmd = SampleContextFnCommand("toggle_loop", do_toggle_loop, arguments={})
for btn in context["buttons"].values():
if not isinstance(btn.default_cmd_push, SampleCommand):
continue
btn.set_cmd_push(toggle_cmd)
def toggle_edit(bid, context):
if context[KEY_SID_EDIT] is not None:
context[KEY_SID_EDIT] = None
return
toggle_cmd = SampleContextFnCommand("toggle_edit", do_toggle_edit, arguments={})
for btn in context["buttons"].values():
if not isinstance(btn.default_cmd_push, SampleCommand):
continue
btn.set_cmd_push(toggle_cmd)
def toggle_resample(bid, context):
if context[KEY_SID_RESAMPLE] is not None:
tape = context[KEY_SAMPLE_LIB][context[KEY_SID_RESAMPLE]]
context[KEY_SID_RESAMPLE] = None
sample = tape.to_sample()
context[KEY_SAMPLE_LIB][sample.sid] = sample
return
toggle_cmd = SampleContextFnCommand("toggle_resample", do_toggle_resample, arguments={})
for btn in context["buttons"].values():
if not isinstance(btn.default_cmd_push, SampleCommand):
continue
btn.set_cmd_push(toggle_cmd)
def load_raw_sample_into(bid, context):
reset_buttons(context)
sample = Sample(context[KEY_RAW_SAMPLE_LIB][context[KEY_SELECTED_RAW_SAMPLE]].data[:])
context[KEY_SAMPLE_LIB][sample.sid] = sample
btn = button_by_bid(bid, context)
btn.default_cmd_push.arguments[KEY_SID] = sample.sid
btn.default_cmd_release.arguments[KEY_SID] = sample.sid
def next_raw_sample(bid, context):
context[KEY_SELECTED_RAW_SAMPLE] += 1
if context[KEY_SELECTED_RAW_SAMPLE] >= len(context[KEY_RAW_SAMPLE_LIB]):
context[KEY_SELECTED_RAW_SAMPLE] = 0
def previous_raw_sample(bid, context):
context[KEY_SELECTED_RAW_SAMPLE] -= 1
if context[KEY_SELECTED_RAW_SAMPLE] < 0:
context[KEY_SELECTED_RAW_SAMPLE] = len(context[KEY_RAW_SAMPLE_LIB]) - 1
def play_sample(bid, context):
context[KEY_SELECTED_RAW_SAMPLE] -= 1
if context[KEY_SELECTED_RAW_SAMPLE] < 0:
context[KEY_SELECTED_RAW_SAMPLE] = len(context[KEY_RAW_SAMPLE_LIB]) - 1
def do_toggle_loop(bid, context):
reset_buttons(context)
btn = button_by_bid(bid, context)
old_value = btn.default_cmd_push.arguments.get(KEY_LOOP, False)
btn.default_cmd_push.arguments[KEY_LOOP] = not old_value
btn.default_cmd_release.arguments[KEY_LOOP] = not old_value
def do_toggle_edit(bid, context):
reset_buttons(context)
btn = button_by_bid(bid, context)
context[KEY_SID_EDIT] = btn.default_cmd_push.arguments.get(KEY_SID, None)
def do_toggle_resample(bid, context):
reset_buttons(context)
btn = button_by_bid(bid, context)
# load sample
tape = Tape()
context[KEY_SAMPLE_LIB][tape.sid] = tape
btn.default_cmd_push.arguments[KEY_SID] = tape.sid
btn.default_cmd_release.arguments[KEY_SID] = tape.sid
btn.default_cmd_push.arguments[KEY_LOOP] = False
btn.default_cmd_release.arguments[KEY_LOOP] = False
context[KEY_SID_RESAMPLE] = tape.sid
def edit_up(bid, context):
context[KEY_EDIT_MODE] -= 1
if context[KEY_EDIT_MODE] < 0:
context[KEY_EDIT_MODE] = len(EDIT_MODES) - 1
def edit_down(bid, context):
context[KEY_EDIT_MODE] += 1
if context[KEY_EDIT_MODE] >= len(EDIT_MODES):
context[KEY_EDIT_MODE] = 0
def edit_left(bid, context):
if EDIT_MODES[context[KEY_EDIT_MODE]] == MODE_TRIM_STEP:
context[KEY_TRIM_STEP] -= TRIM_STEP_ADJUST_STEP
if context[KEY_TRIM_STEP] < 0:
context[KEY_TRIM_STEP] = 0
if EDIT_MODES[context[KEY_EDIT_MODE]] == MODE_TRIM_START and context[KEY_SID_EDIT] is not None:
sample = context[KEY_SAMPLE_LIB][context[KEY_SID_EDIT]]
sample.frame_start -= context[KEY_TRIM_STEP]
if sample.frame_start < 0:
sample.frame_start = 0
if EDIT_MODES[context[KEY_EDIT_MODE]] == MODE_TRIM_END and context[KEY_SID_EDIT] is not None:
sample = context[KEY_SAMPLE_LIB][context[KEY_SID_EDIT]]
sample.frame_end -= context[KEY_TRIM_STEP]
if sample.frame_end < sample.frame_start:
sample.frame_end = sample.frame_start
def edit_right(bid, context):
if EDIT_MODES[context[KEY_EDIT_MODE]] == MODE_TRIM_STEP:
context[KEY_TRIM_STEP] += TRIM_STEP_ADJUST_STEP
if EDIT_MODES[context[KEY_EDIT_MODE]] == MODE_TRIM_START and context[KEY_SID_EDIT] is not None:
sample = context[KEY_SAMPLE_LIB][context[KEY_SID_EDIT]]
sample.frame_start += context[KEY_TRIM_STEP]
if sample.frame_start > sample.frame_end:
sample.frame_start = sample.frame_end
if EDIT_MODES[context[KEY_EDIT_MODE]] == MODE_TRIM_END and context[KEY_SID_EDIT] is not None:
sample = context[KEY_SAMPLE_LIB][context[KEY_SID_EDIT]]
sample.frame_end += context[KEY_TRIM_STEP]
if sample.frame_end > sample.frame_end_raw:
sample.frame_end = sample.frame_end_raw
if __name__ == "__main__":
app = App()
app.run()
| [
"numpy.copy",
"os.listdir",
"pygame.init",
"pygame.quit",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.display.flip",
"os.path.join",
"pygame.font.SysFont",
"numpy.zeros",
"numpy.concatenate",
"hsampler.ui.Command.cmd_nop",
"soundfile.read",
"pyaudio.PyAudio",
"random.randint"
... | [((672, 689), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (687, 689), False, 'import pyaudio\n'), ((6452, 6478), 'random.randint', 'random.randint', (['(0)', '(9999999)'], {}), '(0, 9999999)\n', (6466, 6478), False, 'import random\n'), ((6684, 6711), 'numpy.concatenate', 'np.concatenate', (['self.blocks'], {}), '(self.blocks)\n', (6698, 6711), True, 'import numpy as np\n'), ((8800, 8813), 'pygame.init', 'pygame.init', ([], {}), '()\n', (8811, 8813), False, 'import pygame\n'), ((8859, 8937), 'pygame.display.set_mode', 'pygame.display.set_mode', (["(self.cfg['screen_width'], self.cfg['screen_height'])"], {}), "((self.cfg['screen_width'], self.cfg['screen_height']))\n", (8882, 8937), False, 'import pygame\n'), ((9198, 9243), 'numpy.zeros', 'np.zeros', (['(OUTPUT_FRAMES_PER_BLOCK, CHANNELS)'], {}), '((OUTPUT_FRAMES_PER_BLOCK, CHANNELS))\n', (9206, 9243), True, 'import numpy as np\n'), ((14435, 14457), 'os.listdir', 'os.listdir', (['sample_dir'], {}), '(sample_dir)\n', (14445, 14457), False, 'import os\n'), ((14920, 14938), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (14936, 14938), False, 'import pygame\n'), ((6081, 6107), 'random.randint', 'random.randint', (['(0)', '(9999999)'], {}), '(0, 9999999)\n', (6095, 6107), False, 'import random\n'), ((9365, 9397), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""arial"""', '(14)'], {}), "('arial', 14)\n", (9384, 9397), False, 'import pygame\n'), ((9423, 9455), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""arial"""', '(10)'], {}), "('arial', 10)\n", (9442, 9455), False, 'import pygame\n'), ((14487, 14522), 'os.path.join', 'os.path.join', (['sample_dir', 'file_name'], {}), '(sample_dir, file_name)\n', (14499, 14522), False, 'import os\n'), ((14555, 14577), 'soundfile.read', 'sf.read', (['complete_path'], {}), '(complete_path)\n', (14562, 14577), True, 'import soundfile as sf\n'), ((18014, 18035), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (18033, 18035), False, 'import pygame\n'), ((10068, 10085), 'hsampler.ui.Command.cmd_nop', 'Command.cmd_nop', ([], {}), '()\n', (10083, 10085), False, 'from hsampler.ui import ModeButton, construct_test_button, Command\n'), ((10111, 10128), 'hsampler.ui.Command.cmd_nop', 'Command.cmd_nop', ([], {}), '()\n', (10126, 10128), False, 'from hsampler.ui import ModeButton, construct_test_button, Command\n'), ((10924, 10941), 'hsampler.ui.Command.cmd_nop', 'Command.cmd_nop', ([], {}), '()\n', (10939, 10941), False, 'from hsampler.ui import ModeButton, construct_test_button, Command\n'), ((11278, 11295), 'hsampler.ui.Command.cmd_nop', 'Command.cmd_nop', ([], {}), '()\n', (11293, 11295), False, 'from hsampler.ui import ModeButton, construct_test_button, Command\n'), ((11632, 11649), 'hsampler.ui.Command.cmd_nop', 'Command.cmd_nop', ([], {}), '()\n', (11647, 11649), False, 'from hsampler.ui import ModeButton, construct_test_button, Command\n'), ((11974, 11991), 'hsampler.ui.Command.cmd_nop', 'Command.cmd_nop', ([], {}), '()\n', (11989, 11991), False, 'from hsampler.ui import ModeButton, construct_test_button, Command\n'), ((12316, 12333), 'hsampler.ui.Command.cmd_nop', 'Command.cmd_nop', ([], {}), '()\n', (12331, 12333), False, 'from hsampler.ui import ModeButton, construct_test_button, Command\n'), ((12650, 12667), 'hsampler.ui.Command.cmd_nop', 'Command.cmd_nop', ([], {}), '()\n', (12665, 12667), False, 'from hsampler.ui import ModeButton, construct_test_button, Command\n'), ((12988, 13005), 'hsampler.ui.Command.cmd_nop', 'Command.cmd_nop', ([], {}), '()\n', (13003, 13005), False, 'from hsampler.ui import ModeButton, construct_test_button, Command\n'), ((13326, 13343), 'hsampler.ui.Command.cmd_nop', 'Command.cmd_nop', ([], {}), '()\n', (13341, 13343), False, 'from hsampler.ui import ModeButton, construct_test_button, Command\n'), ((13666, 13683), 'hsampler.ui.Command.cmd_nop', 'Command.cmd_nop', ([], {}), '()\n', (13681, 13683), False, 'from hsampler.ui import ModeButton, construct_test_button, Command\n'), ((14013, 14030), 'hsampler.ui.Command.cmd_nop', 'Command.cmd_nop', ([], {}), '()\n', (14028, 14030), False, 'from hsampler.ui import ModeButton, construct_test_button, Command\n'), ((15226, 15239), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (15237, 15239), False, 'import pygame\n'), ((18429, 18450), 'numpy.copy', 'np.copy', (['self._buffer'], {}), '(self._buffer)\n', (18436, 18450), True, 'import numpy as np\n')] |
import sys
import moderngl
import numpy as np
from pyrr import Matrix44
from shadevolution import models, fresnel, shader, plot
class Evaluator:
"""
An evaluator that runs a genetic algorithm using OpenGL for the fitness evaluation.
"""
gl_version = (4, 1)
def __init__(self, window, size=(2048, 2048), repeat=32, enable_plot=True):
"""
Construct an evaluator instance.
:param window: The window to perform the evaluation in.
:param size: The size of the framebuffer to render in.
:param enable_plot: A flag to enable live plotting of results.
"""
self.wnd = window
self.ctx = window.ctx
self.size = size
self.repeat = repeat
self.reference_program = fresnel.create_program(self.ctx)
self.fbo = self.ctx.simple_framebuffer(size, 4)
self.vao = models.load_crate()
self.fresnel = fresnel.Fresnel(self.ctx, self.vao)
self.reporter = plot.VisualizationReporter() if enable_plot else None
def __del__(self):
self.vao.release()
self.fbo.release()
def determine_baseline(self):
"""
Determine the baseline value of the original shader.
:return: The baseline as a Numpy array representing the framebuffer.
"""
view = self._prepare_view()
projection = self._prepare_projection(aspect=16/9, fovy=40)
program = self.reference_program
res = []
try:
for i in range(self.repeat):
model = self._prepare_model(0.05 * i)
# Render in our framebuffer object
self.fbo.use()
self.fbo.clear()
self.fresnel.render(program, model, view, projection)
raw = self.fbo.read(components=3, dtype='f4')
fb = np.frombuffer(raw, dtype='f4')
fb = fb.reshape((len(fb) // 3, 3))
res.append(fb)
# Render demonstration
self.wnd.use()
self._render_window(program, model, view, projection)
self.wnd.swap_buffers()
finally:
# Make sure we render again to the window
self.wnd.use()
return res
def eval(self, individual, genesis, baseline):
"""
Evaluate the specified individual.
:param individual: The individual to evaluate.
:param genesis: The genesis tree.
:param baseline: The baseline to compute the error against.
:return: The score of the individual.
"""
name = 'Fresnel'
params = [('th', 'float'), ('n', 'float')]
source = shader.write(name, params, individual)
# Print difference between shaders to console
sys.stdout.writelines(shader.diff(name, params, genesis, individual))
print('')
view = self._prepare_view()
projection = self._prepare_projection(aspect=16/9, fovy=40)
frame_durations = []
errors = []
try:
program = fresnel.create_program(self.ctx, source)
for i in range(self.repeat):
model = self._prepare_model(0.05 * i)
# Render in our framebuffer object
self.fbo.use()
self.fbo.clear()
query = self.fresnel.render(program, model, view, projection)
frame_durations.append(query.elapsed)
# self.duration_line.append(self.count, query.elapsed)
# self.count += 1
raw = self.fbo.read(components=3, dtype='f4')
fb = np.frombuffer(raw, dtype='f4')
fb = fb.reshape((len(fb) // 3, 3))
err = np.linalg.norm(fb - baseline[i])
errors.append(err)
# Render demonstration
self.wnd.use()
self._render_window(program, model, view, projection)
self.wnd.swap_buffers()
if self.reporter:
self.reporter.report(query.elapsed, err)
except Exception as e:
print(e)
# Make sure we can render again to the window
self.wnd.use()
return 35000000, 10000
return np.mean(frame_durations), np.mean(errors)
def _render_window(self, program, model, view, projection):
"""
Render a demo of the current shaders at work to screen.
"""
self.ctx.clear()
self.ctx.enable_only(moderngl.DEPTH_TEST | moderngl.CULL_FACE | moderngl.BLEND)
projection_left = Matrix44.from_translation((-0.5, 0, 0), dtype='f4') * projection
projection_right = Matrix44.from_translation((0.5, 0, 0), dtype='f4') * projection
self.fresnel.render(self.reference_program, model, view, projection_left)
self.fresnel.render(program, model, view, projection_right)
@staticmethod
def _prepare_model(rot):
"""
Prepare the model matrix.
:param rot: The rotation to apply.
"""
translation = Matrix44.from_translation((0, 0, 0), dtype='f4')
rotation = Matrix44.from_eulers((0, 0, rot), dtype='f4')
return translation * rotation
@staticmethod
def _prepare_view(x=2, y=2, z=2):
"""
Prepare the view matrix.
"""
return Matrix44.look_at(
(x, y, z),
(0.0, 0.0, 0.0),
(0.0, 1.0, 0.0),
dtype='f4',
)
@staticmethod
def _prepare_projection(aspect, fovy=60):
"""
Prepare the projection matrix.
"""
return Matrix44.perspective_projection(
fovy=fovy, aspect=aspect, near=1.0, far=100.0,
dtype='f4'
)
| [
"numpy.mean",
"numpy.frombuffer",
"shadevolution.shader.diff",
"pyrr.Matrix44.perspective_projection",
"shadevolution.models.load_crate",
"pyrr.Matrix44.from_eulers",
"numpy.linalg.norm",
"shadevolution.fresnel.create_program",
"shadevolution.shader.write",
"pyrr.Matrix44.from_translation",
"pyr... | [((766, 798), 'shadevolution.fresnel.create_program', 'fresnel.create_program', (['self.ctx'], {}), '(self.ctx)\n', (788, 798), False, 'from shadevolution import models, fresnel, shader, plot\n'), ((874, 893), 'shadevolution.models.load_crate', 'models.load_crate', ([], {}), '()\n', (891, 893), False, 'from shadevolution import models, fresnel, shader, plot\n'), ((917, 952), 'shadevolution.fresnel.Fresnel', 'fresnel.Fresnel', (['self.ctx', 'self.vao'], {}), '(self.ctx, self.vao)\n', (932, 952), False, 'from shadevolution import models, fresnel, shader, plot\n'), ((2687, 2725), 'shadevolution.shader.write', 'shader.write', (['name', 'params', 'individual'], {}), '(name, params, individual)\n', (2699, 2725), False, 'from shadevolution import models, fresnel, shader, plot\n'), ((5096, 5144), 'pyrr.Matrix44.from_translation', 'Matrix44.from_translation', (['(0, 0, 0)'], {'dtype': '"""f4"""'}), "((0, 0, 0), dtype='f4')\n", (5121, 5144), False, 'from pyrr import Matrix44\n'), ((5164, 5209), 'pyrr.Matrix44.from_eulers', 'Matrix44.from_eulers', (['(0, 0, rot)'], {'dtype': '"""f4"""'}), "((0, 0, rot), dtype='f4')\n", (5184, 5209), False, 'from pyrr import Matrix44\n'), ((5377, 5450), 'pyrr.Matrix44.look_at', 'Matrix44.look_at', (['(x, y, z)', '(0.0, 0.0, 0.0)', '(0.0, 1.0, 0.0)'], {'dtype': '"""f4"""'}), "((x, y, z), (0.0, 0.0, 0.0), (0.0, 1.0, 0.0), dtype='f4')\n", (5393, 5450), False, 'from pyrr import Matrix44\n'), ((5653, 5748), 'pyrr.Matrix44.perspective_projection', 'Matrix44.perspective_projection', ([], {'fovy': 'fovy', 'aspect': 'aspect', 'near': '(1.0)', 'far': '(100.0)', 'dtype': '"""f4"""'}), "(fovy=fovy, aspect=aspect, near=1.0, far=\n 100.0, dtype='f4')\n", (5684, 5748), False, 'from pyrr import Matrix44\n'), ((977, 1005), 'shadevolution.plot.VisualizationReporter', 'plot.VisualizationReporter', ([], {}), '()\n', (1003, 1005), False, 'from shadevolution import models, fresnel, shader, plot\n'), ((2811, 2857), 'shadevolution.shader.diff', 'shader.diff', (['name', 'params', 'genesis', 'individual'], {}), '(name, params, genesis, individual)\n', (2822, 2857), False, 'from shadevolution import models, fresnel, shader, plot\n'), ((3068, 3108), 'shadevolution.fresnel.create_program', 'fresnel.create_program', (['self.ctx', 'source'], {}), '(self.ctx, source)\n', (3090, 3108), False, 'from shadevolution import models, fresnel, shader, plot\n'), ((4282, 4306), 'numpy.mean', 'np.mean', (['frame_durations'], {}), '(frame_durations)\n', (4289, 4306), True, 'import numpy as np\n'), ((4308, 4323), 'numpy.mean', 'np.mean', (['errors'], {}), '(errors)\n', (4315, 4323), True, 'import numpy as np\n'), ((4617, 4668), 'pyrr.Matrix44.from_translation', 'Matrix44.from_translation', (['(-0.5, 0, 0)'], {'dtype': '"""f4"""'}), "((-0.5, 0, 0), dtype='f4')\n", (4642, 4668), False, 'from pyrr import Matrix44\n'), ((4709, 4759), 'pyrr.Matrix44.from_translation', 'Matrix44.from_translation', (['(0.5, 0, 0)'], {'dtype': '"""f4"""'}), "((0.5, 0, 0), dtype='f4')\n", (4734, 4759), False, 'from pyrr import Matrix44\n'), ((1850, 1880), 'numpy.frombuffer', 'np.frombuffer', (['raw'], {'dtype': '"""f4"""'}), "(raw, dtype='f4')\n", (1863, 1880), True, 'import numpy as np\n'), ((3644, 3674), 'numpy.frombuffer', 'np.frombuffer', (['raw'], {'dtype': '"""f4"""'}), "(raw, dtype='f4')\n", (3657, 3674), True, 'import numpy as np\n'), ((3749, 3781), 'numpy.linalg.norm', 'np.linalg.norm', (['(fb - baseline[i])'], {}), '(fb - baseline[i])\n', (3763, 3781), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
def generate_water_stats():
cov = np.array([[1.2, 1], [1, 1]])
mean = np.array([3.2, 3.5])
values = np.random.multivariate_normal(mean=mean, cov=cov, size=100)
return values[:, 0]*5, values[:, 1]
def plot_stats_without_lobf(x, y):
plt.scatter(x, y)
plt.title("Fake water consumption study")
plt.xlabel("Temperature in °C")
plt.ylabel("Liters of water")
plt.show()
def plot_stats_with_lobf(x, y, a, b):
lobf = a*x + b
plt.scatter(x, y)
plt.plot(x, lobf)
plt.title("Fake water consumption study with line of best fit")
plt.xlabel("Temperature in °C")
plt.ylabel("Liters of water")
plt.show()
def calculate_lobf(x, y):
denom = x.mean() * x.sum() - x.dot(x)
a = (x.sum() * y.mean() - x.dot(y)) / denom
b = (x.dot(y) * x.mean() - y.mean() * x.dot(x)) / denom
return a, b
x, y = generate_water_stats()
plot_stats_without_lobf(x, y)
a, b = calculate_lobf(x, y)
plot_stats_with_lobf(x, y, a, b)
print(f"Line of best fit: y = {a:.2f}*x + {b:.2f}")
| [
"matplotlib.pyplot.ylabel",
"numpy.random.multivariate_normal",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((91, 119), 'numpy.array', 'np.array', (['[[1.2, 1], [1, 1]]'], {}), '([[1.2, 1], [1, 1]])\n', (99, 119), True, 'import numpy as np\n'), ((131, 151), 'numpy.array', 'np.array', (['[3.2, 3.5]'], {}), '([3.2, 3.5])\n', (139, 151), True, 'import numpy as np\n'), ((166, 225), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', ([], {'mean': 'mean', 'cov': 'cov', 'size': '(100)'}), '(mean=mean, cov=cov, size=100)\n', (195, 225), True, 'import numpy as np\n'), ((311, 328), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (322, 328), True, 'import matplotlib.pyplot as plt\n'), ((333, 374), 'matplotlib.pyplot.title', 'plt.title', (['"""Fake water consumption study"""'], {}), "('Fake water consumption study')\n", (342, 374), True, 'import matplotlib.pyplot as plt\n'), ((379, 410), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temperature in °C"""'], {}), "('Temperature in °C')\n", (389, 410), True, 'import matplotlib.pyplot as plt\n'), ((415, 444), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Liters of water"""'], {}), "('Liters of water')\n", (425, 444), True, 'import matplotlib.pyplot as plt\n'), ((449, 459), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (457, 459), True, 'import matplotlib.pyplot as plt\n'), ((522, 539), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (533, 539), True, 'import matplotlib.pyplot as plt\n'), ((544, 561), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'lobf'], {}), '(x, lobf)\n', (552, 561), True, 'import matplotlib.pyplot as plt\n'), ((566, 629), 'matplotlib.pyplot.title', 'plt.title', (['"""Fake water consumption study with line of best fit"""'], {}), "('Fake water consumption study with line of best fit')\n", (575, 629), True, 'import matplotlib.pyplot as plt\n'), ((634, 665), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temperature in °C"""'], {}), "('Temperature in °C')\n", (644, 665), True, 'import matplotlib.pyplot as plt\n'), ((670, 699), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Liters of water"""'], {}), "('Liters of water')\n", (680, 699), True, 'import matplotlib.pyplot as plt\n'), ((704, 714), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (712, 714), True, 'import matplotlib.pyplot as plt\n')] |
import unittest
import logging
import numpy as np
import pandas as pd
import scipy.stats as stats
from batchglm.api.models.glm_nb import Simulator
import diffxpy.api as de
class TestExtremeValues(unittest.TestCase):
def test_t_test_zero_variance(self, n_cells: int = 2000, n_genes: int = 100):
"""
Test if de.t_test() generates a uniform p-value distribution
if it is given data simulated based on the null model. Returns the p-value
of the two-side Kolmgorov-Smirnov test for equality of the observed
p-value distribution and a uniform distribution.
:param n_cells: Number of cells to simulate (number of observations per test).
:param n_genes: Number of genes to simulate (number of tests).
"""
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("batchglm").setLevel(logging.WARNING)
logging.getLogger("diffxpy").setLevel(logging.WARNING)
sim = Simulator(num_observations=n_cells, num_features=n_genes)
sim.generate_sample_description(num_batches=0, num_conditions=0)
sim.generate()
sim.data.X[:, 0] = np.exp(sim.a)[0, 0]
random_sample_description = pd.DataFrame({
"condition": np.random.randint(2, size=sim.num_observations)
})
test = de.test.t_test(
data=sim.X,
grouping="condition",
sample_description=random_sample_description
)
# Compare p-value distribution under null model against uniform distribution.
pval_h0 = stats.kstest(test.pval, 'uniform').pvalue
print('KS-test pvalue for null model match of t_test(): %f' % pval_h0)
assert pval_h0 > 0.05, "KS-Test failed: pval_h0 is <= 0.05!"
return pval_h0
if __name__ == '__main__':
unittest.main()
| [
"logging.getLogger",
"batchglm.api.models.glm_nb.Simulator",
"diffxpy.api.test.t_test",
"scipy.stats.kstest",
"numpy.exp",
"numpy.random.randint",
"unittest.main"
] | [((1825, 1840), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1838, 1840), False, 'import unittest\n'), ((977, 1034), 'batchglm.api.models.glm_nb.Simulator', 'Simulator', ([], {'num_observations': 'n_cells', 'num_features': 'n_genes'}), '(num_observations=n_cells, num_features=n_genes)\n', (986, 1034), False, 'from batchglm.api.models.glm_nb import Simulator\n'), ((1330, 1429), 'diffxpy.api.test.t_test', 'de.test.t_test', ([], {'data': 'sim.X', 'grouping': '"""condition"""', 'sample_description': 'random_sample_description'}), "(data=sim.X, grouping='condition', sample_description=\n random_sample_description)\n", (1344, 1429), True, 'import diffxpy.api as de\n'), ((1158, 1171), 'numpy.exp', 'np.exp', (['sim.a'], {}), '(sim.a)\n', (1164, 1171), True, 'import numpy as np\n'), ((1576, 1610), 'scipy.stats.kstest', 'stats.kstest', (['test.pval', '"""uniform"""'], {}), "(test.pval, 'uniform')\n", (1588, 1610), True, 'import scipy.stats as stats\n'), ((779, 810), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (796, 810), False, 'import logging\n'), ((843, 872), 'logging.getLogger', 'logging.getLogger', (['"""batchglm"""'], {}), "('batchglm')\n", (860, 872), False, 'import logging\n'), ((907, 935), 'logging.getLogger', 'logging.getLogger', (['"""diffxpy"""'], {}), "('diffxpy')\n", (924, 935), False, 'import logging\n'), ((1255, 1302), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'sim.num_observations'}), '(2, size=sim.num_observations)\n', (1272, 1302), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
class ConditionalGenerator(nn.Module):
def __init__(self, n_classes, latent_dim, img_shape):
super(ConditionalGenerator, self).__init__()
self.img_shape = img_shape
self.label_emb = nn.Embedding(n_classes, n_classes)
def block(in_feat, out_feat, normalize=True):
layers = [nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*block(latent_dim + n_classes, 128, normalize=False),
*block(128, 256),
*block(256, 512),
*block(512, 1024),
nn.Linear(1024, int(np.prod(img_shape))),
nn.Tanh()
)
def forward(self, noise, labels):
# Concatenate label embedding and image to produce input
gen_input = torch.cat((self.label_emb(labels), noise), -1)
img = self.model(gen_input)
img = img.view(img.size(0), *self.img_shape)
return img
class ConditionalDiscriminator(nn.Module):
def __init__(self, n_classes, img_shape):
super(ConditionalDiscriminator, self).__init__()
self.label_embedding = nn.Embedding(n_classes, n_classes)
self.model = nn.Sequential(
nn.Linear(n_classes + int(np.prod(img_shape)), 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 512),
nn.Dropout(0.4),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 512),
nn.Dropout(0.4),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 1),
nn.Sigmoid()
)
def forward(self, img, labels):
# Concatenate label embedding and image to produce input
d_in = torch.cat((img.view(img.size(0), -1), self.label_embedding(labels)), -1)
validity = self.model(d_in)
return validity
| [
"torch.nn.Sigmoid",
"numpy.prod",
"torch.nn.Dropout",
"torch.nn.Tanh",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"torch.nn.Embedding"
] | [((266, 300), 'torch.nn.Embedding', 'nn.Embedding', (['n_classes', 'n_classes'], {}), '(n_classes, n_classes)\n', (278, 300), True, 'import torch.nn as nn\n'), ((1319, 1353), 'torch.nn.Embedding', 'nn.Embedding', (['n_classes', 'n_classes'], {}), '(n_classes, n_classes)\n', (1331, 1353), True, 'import torch.nn as nn\n'), ((840, 849), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (847, 849), True, 'import torch.nn as nn\n'), ((1468, 1499), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1480, 1499), True, 'import torch.nn as nn\n'), ((1513, 1532), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(512)'], {}), '(512, 512)\n', (1522, 1532), True, 'import torch.nn as nn\n'), ((1546, 1561), 'torch.nn.Dropout', 'nn.Dropout', (['(0.4)'], {}), '(0.4)\n', (1556, 1561), True, 'import torch.nn as nn\n'), ((1575, 1606), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1587, 1606), True, 'import torch.nn as nn\n'), ((1620, 1639), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(512)'], {}), '(512, 512)\n', (1629, 1639), True, 'import torch.nn as nn\n'), ((1653, 1668), 'torch.nn.Dropout', 'nn.Dropout', (['(0.4)'], {}), '(0.4)\n', (1663, 1668), True, 'import torch.nn as nn\n'), ((1682, 1713), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1694, 1713), True, 'import torch.nn as nn\n'), ((1727, 1744), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(1)'], {}), '(512, 1)\n', (1736, 1744), True, 'import torch.nn as nn\n'), ((1758, 1770), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1768, 1770), True, 'import torch.nn as nn\n'), ((378, 406), 'torch.nn.Linear', 'nn.Linear', (['in_feat', 'out_feat'], {}), '(in_feat, out_feat)\n', (387, 406), True, 'import torch.nn as nn\n'), ((521, 552), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (533, 552), True, 'import torch.nn as nn\n'), ((464, 493), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_feat', '(0.8)'], {}), '(out_feat, 0.8)\n', (478, 493), True, 'import torch.nn as nn\n'), ((806, 824), 'numpy.prod', 'np.prod', (['img_shape'], {}), '(img_shape)\n', (813, 824), True, 'import numpy as np\n'), ((1429, 1447), 'numpy.prod', 'np.prod', (['img_shape'], {}), '(img_shape)\n', (1436, 1447), True, 'import numpy as np\n')] |
from unittest import TestCase
import numpy as np
import pandas as pd
from copulas.univariate.gaussian import GaussianUnivariate
class TestGaussianUnivariate(TestCase):
def test___init__(self):
"""On init, default values are set on instance."""
# Setup / Run
copula = GaussianUnivariate()
# Check
assert not copula.name
assert copula.mean == 0
assert copula.std == 1
def test___str__(self):
"""str returns details about the model."""
# Setup
copula = GaussianUnivariate()
expected_result = '\n'.join([
'Distribution Type: Gaussian',
'Variable name: None',
'Mean: 0',
'Standard deviation: 1'
])
# Run
result = copula.__str__()
# Check
assert result == expected_result
def test_fit(self):
"""On fit, stats from fit data are set in the model."""
# Setup
copula = GaussianUnivariate()
column = pd.Series([0, 1, 2, 3, 4, 5], name='column')
mean = 2.5
std = 1.707825127659933
name = 'column'
# Run
copula.fit(column)
# Check
assert copula.mean == mean
assert copula.std == std
assert copula.name == name
def test_fit_empty_data(self):
"""On fit, if column is empty an error is raised."""
# Setup
copula = GaussianUnivariate()
column = pd.Series([])
# Run
with self.assertRaises(ValueError):
copula.fit(column)
def test_test_fit_equal_values(self):
"""On fit, even if column has equal values, std is never 0."""
# Setup
copula = GaussianUnivariate()
column = [1, 1, 1, 1, 1, 1]
# Run
copula.fit(column)
# Check
assert copula.mean == 1
assert copula.std == 0.001
def test_get_probability_density(self):
"""Probability_density returns the normal probability distribution for the given values."""
# Setup
copula = GaussianUnivariate()
column = [-1, 0, 1]
copula.fit(column)
expected_result = 0.48860251190292
# Run
result = copula.probability_density(0)
# Check
assert result == expected_result
def test_cumulative_distribution(self):
"""Cumulative_density returns the cumulative distribution value for a point."""
# Setup
copula = GaussianUnivariate()
column = [-1, 0, 1]
copula.fit(column)
x = pd.Series([0])
expected_result = [0.5]
# Run
result = copula.cumulative_distribution(x)
# Check
assert (result == expected_result).all()
def test_percent_point(self):
"""Percent_point returns the original point from the cumulative probability value."""
# Setup
copula = GaussianUnivariate()
column = [-1, 0, 1]
copula.fit(column)
x = 0.5
expected_result = 0
# Run
result = copula.percent_point(x)
# Check
assert (result == expected_result).all()
def test_percent_point_reverse_cumulative_distribution(self):
"""Combined cumulative_distribution and percent_point is the identity function."""
# Setup
copula = GaussianUnivariate()
column = [-1, 0, 1]
copula.fit(column)
initial_value = pd.Series([0])
# Run
result_a = copula.percent_point(copula.cumulative_distribution(initial_value))
result_b = copula.cumulative_distribution(copula.percent_point(initial_value))
# Check
assert (initial_value == result_a).all()
assert (initial_value == result_b).all()
def test_sample(self):
"""After fitting, GaussianUnivariate is able to sample new data."""
# Setup
copula = GaussianUnivariate()
column = [-1, 0, 1]
copula.fit(column)
# Run
result = copula.sample(1000000)
# Check
assert len(result) == 1000000
assert abs(np.mean(result) - copula.mean) < 10E-3
assert abs(np.std(result) - copula.std) < 10E-3
def test_to_dict(self):
"""To_dict returns the defining parameters of a distribution in a dict."""
# Setup
copula = GaussianUnivariate()
column = [0, 1, 2, 3, 4, 5]
copula.fit(column)
expected_result = {
'mean': 2.5,
'std': 1.707825127659933
}
# Run
result = copula.to_dict()
# Check
assert result == expected_result
def test_from_dict(self):
"""From_dict sets the values of a dictionary as attributes of the instance."""
# Setup
parameters = {
'mean': 2.5,
'std': 1.707825127659933
}
# Run
copula = GaussianUnivariate.from_dict(parameters)
# Check
assert copula.mean == 2.5
assert copula.std == 1.707825127659933
copula.sample(10)
| [
"pandas.Series",
"copulas.univariate.gaussian.GaussianUnivariate",
"numpy.mean",
"copulas.univariate.gaussian.GaussianUnivariate.from_dict",
"numpy.std"
] | [((301, 321), 'copulas.univariate.gaussian.GaussianUnivariate', 'GaussianUnivariate', ([], {}), '()\n', (319, 321), False, 'from copulas.univariate.gaussian import GaussianUnivariate\n'), ((547, 567), 'copulas.univariate.gaussian.GaussianUnivariate', 'GaussianUnivariate', ([], {}), '()\n', (565, 567), False, 'from copulas.univariate.gaussian import GaussianUnivariate\n'), ((984, 1004), 'copulas.univariate.gaussian.GaussianUnivariate', 'GaussianUnivariate', ([], {}), '()\n', (1002, 1004), False, 'from copulas.univariate.gaussian import GaussianUnivariate\n'), ((1022, 1066), 'pandas.Series', 'pd.Series', (['[0, 1, 2, 3, 4, 5]'], {'name': '"""column"""'}), "([0, 1, 2, 3, 4, 5], name='column')\n", (1031, 1066), True, 'import pandas as pd\n'), ((1435, 1455), 'copulas.univariate.gaussian.GaussianUnivariate', 'GaussianUnivariate', ([], {}), '()\n', (1453, 1455), False, 'from copulas.univariate.gaussian import GaussianUnivariate\n'), ((1473, 1486), 'pandas.Series', 'pd.Series', (['[]'], {}), '([])\n', (1482, 1486), True, 'import pandas as pd\n'), ((1725, 1745), 'copulas.univariate.gaussian.GaussianUnivariate', 'GaussianUnivariate', ([], {}), '()\n', (1743, 1745), False, 'from copulas.univariate.gaussian import GaussianUnivariate\n'), ((2087, 2107), 'copulas.univariate.gaussian.GaussianUnivariate', 'GaussianUnivariate', ([], {}), '()\n', (2105, 2107), False, 'from copulas.univariate.gaussian import GaussianUnivariate\n'), ((2493, 2513), 'copulas.univariate.gaussian.GaussianUnivariate', 'GaussianUnivariate', ([], {}), '()\n', (2511, 2513), False, 'from copulas.univariate.gaussian import GaussianUnivariate\n'), ((2581, 2595), 'pandas.Series', 'pd.Series', (['[0]'], {}), '([0])\n', (2590, 2595), True, 'import pandas as pd\n'), ((2923, 2943), 'copulas.univariate.gaussian.GaussianUnivariate', 'GaussianUnivariate', ([], {}), '()\n', (2941, 2943), False, 'from copulas.univariate.gaussian import GaussianUnivariate\n'), ((3357, 3377), 'copulas.univariate.gaussian.GaussianUnivariate', 'GaussianUnivariate', ([], {}), '()\n', (3375, 3377), False, 'from copulas.univariate.gaussian import GaussianUnivariate\n'), ((3457, 3471), 'pandas.Series', 'pd.Series', (['[0]'], {}), '([0])\n', (3466, 3471), True, 'import pandas as pd\n'), ((3913, 3933), 'copulas.univariate.gaussian.GaussianUnivariate', 'GaussianUnivariate', ([], {}), '()\n', (3931, 3933), False, 'from copulas.univariate.gaussian import GaussianUnivariate\n'), ((4358, 4378), 'copulas.univariate.gaussian.GaussianUnivariate', 'GaussianUnivariate', ([], {}), '()\n', (4376, 4378), False, 'from copulas.univariate.gaussian import GaussianUnivariate\n'), ((4910, 4950), 'copulas.univariate.gaussian.GaussianUnivariate.from_dict', 'GaussianUnivariate.from_dict', (['parameters'], {}), '(parameters)\n', (4938, 4950), False, 'from copulas.univariate.gaussian import GaussianUnivariate\n'), ((4118, 4133), 'numpy.mean', 'np.mean', (['result'], {}), '(result)\n', (4125, 4133), True, 'import numpy as np\n'), ((4176, 4190), 'numpy.std', 'np.std', (['result'], {}), '(result)\n', (4182, 4190), True, 'import numpy as np\n')] |
import numpy as np
class MF():
'''
Matrix Factorisation alogrithm based on <NAME>'s method
Key input is the sparse user-item ratings array, with user ratings in an array with
a row per user, and a column per item. Values are the users known rating, or zero if
no rating is available.
The output is a user-item ratings array with all items given a rating. The predicted raings can be
extracted from this array.
'''
def __init__(self, latent_features = 3, alpha = 0.0002, beta = 0.02, bias = False):
self.latent_features = latent_features
self.alpha = alpha
self.beta = beta
self.R_est = None
self.H = None
self.W = None
self.R = None
self.bias = bias
self.user_bias = None
self.item_bias = None
def fit(self, R, iter = 1000, error_threshold = 0.05):
self.R = R
self.users, self.items = R.shape
self.H = np.random.rand(self.users, self.latent_features)
self.W = np.random.rand(self.latent_features, self.items)
if self.bias:
self.user_bias = np.random.rand(self.users, 1)
self.item_bias = np.random.rand(1, self.items)
for i in range(iter):
if i % (int(iter / 25)) == 0:
print("Iteration {0}...".format(i))
# get Error:
R_est = self._R_est()
error = np.subtract(self.R, R_est)
#Check for good match before max iter is reached
if self._error() < error_threshold:
print("Local minima found after {0} iterations".format(i))
break
#Perform Gradient Decent
for user in range(self.users):
for item in range(self.items):
if self.R[user,item] > 0: #Only use data where we have obvs
e = error[user, item]
for k in range(self.latent_features):
self.H[user,k] += self.alpha * (2 * e * self.W[k,item] - self.beta * self.H[user, k])
self.W[k,item] += self.alpha * (2 * e * self.H[user,k] - self.beta * self.W[k, item])
if self.bias:
self.item_bias[0, item] += self.alpha * (e - self.beta * self.item_bias[0, item])
self.user_bias[user, 0] += self.alpha * (e - self.beta * self.user_bias[user, 0])
self.R_est = np.matmul(self.H, self.W)
return self._R_est()
def _error(self):
sum_error = 0
error = np.subtract(self.R, self._R_est())
for user in range(self.users):
for item in range(self.items):
if self.R[user,item] > 0:
sum_error += abs(error[user, item])
prop_error = sum_error / np.sum(self.R)
return prop_error
def _R_est(self):
if self.bias:
return np.matmul(self.H, self.W) + self.user_bias + self.item_bias
else:
return np.matmul(self.H, self.W)
if __name__ == "__main__":
#ratings array
R = np.array([ [1, 0, 0, 4, 5],
[2, 5, 1, 5, 5],
[1, 4, 1, 5, 4],
[4, 1, 4, 0, 3]])
mf = MF(bias=True)
R_est = mf.fit(R, error_threshold=0.005)
print(R_est)
| [
"numpy.random.rand",
"numpy.subtract",
"numpy.sum",
"numpy.array",
"numpy.matmul"
] | [((2603, 2681), 'numpy.array', 'np.array', (['[[1, 0, 0, 4, 5], [2, 5, 1, 5, 5], [1, 4, 1, 5, 4], [4, 1, 4, 0, 3]]'], {}), '([[1, 0, 0, 4, 5], [2, 5, 1, 5, 5], [1, 4, 1, 5, 4], [4, 1, 4, 0, 3]])\n', (2611, 2681), True, 'import numpy as np\n'), ((847, 895), 'numpy.random.rand', 'np.random.rand', (['self.users', 'self.latent_features'], {}), '(self.users, self.latent_features)\n', (861, 895), True, 'import numpy as np\n'), ((907, 955), 'numpy.random.rand', 'np.random.rand', (['self.latent_features', 'self.items'], {}), '(self.latent_features, self.items)\n', (921, 955), True, 'import numpy as np\n'), ((2069, 2094), 'numpy.matmul', 'np.matmul', (['self.H', 'self.W'], {}), '(self.H, self.W)\n', (2078, 2094), True, 'import numpy as np\n'), ((993, 1022), 'numpy.random.rand', 'np.random.rand', (['self.users', '(1)'], {}), '(self.users, 1)\n', (1007, 1022), True, 'import numpy as np\n'), ((1043, 1072), 'numpy.random.rand', 'np.random.rand', (['(1)', 'self.items'], {}), '(1, self.items)\n', (1057, 1072), True, 'import numpy as np\n'), ((1223, 1249), 'numpy.subtract', 'np.subtract', (['self.R', 'R_est'], {}), '(self.R, R_est)\n', (1234, 1249), True, 'import numpy as np\n'), ((2367, 2381), 'numpy.sum', 'np.sum', (['self.R'], {}), '(self.R)\n', (2373, 2381), True, 'import numpy as np\n'), ((2528, 2553), 'numpy.matmul', 'np.matmul', (['self.H', 'self.W'], {}), '(self.H, self.W)\n', (2537, 2553), True, 'import numpy as np\n'), ((2450, 2475), 'numpy.matmul', 'np.matmul', (['self.H', 'self.W'], {}), '(self.H, self.W)\n', (2459, 2475), True, 'import numpy as np\n')] |
import numpy as np
from sklearn import preprocessing
from sklearn.naive_bayes import GaussianNB
from flask import Flask
from flask_restful import reqparse, abort, Api, Resource
# Initialise Flask App
app = Flask(__name__)
api = Api(app)
# For labelling the dataset
le = preprocessing.LabelEncoder()
# Creating a Gaussian classifier
model = GaussianNB()
# Adding 'query' argument to the HTTP request
parser = reqparse.RequestParser()
parser.add_argument('query')
class PredictResult(Resource):
def get(self):
# Use parser and find the user's query
args = parser.parse_args()
user_query = args['query']
user_query = user_query.split(',')
user_query = list(map(int, user_query))
# Sample dataset
feeling = ['anxious', 'anxious', 'alone', 'depressed', 'okay', 'sad', 'afraid', 'anxious', 'anxious', 'anxious', 'excited', 'tired', 'bored', 'happy']
behaviour = ['social media', 'friends', 'work', 'social media', 'family', 'health', 'college', 'work', 'social media', 'friends', 'family', 'work', 'partner', 'work']
alone = ['yes', 'yes', 'sometimes', 'no', 'yes', 'sometimes', 'no', 'no', 'sometimes', 'no', 'no', 'sometimes', 'sometimes', 'no']
negativity = ['very often', 'often', 'often', 'sometimes', 'sometimes', 'rarely', 'sometimes', 'rarely', 'sometimes', 'never', 'rarely', 'sometimes', 'rarely', 'never']
rejection = ['very often', 'very often', 'very often', 'sometimes', 'sometimes', 'often', 'sometimes', 'rarely', 'never', 'sometimes', 'rarely', 'rarely', 'rarely', 'never']
result = ['yes', 'yes', 'yes', 'yes', 'no', 'yes', 'yes', 'no', 'no', 'yes', 'no', 'no', 'no', 'no']
# Label encoding
feeling_encoded = le.fit_transform(feeling)
behaviour_encoded = le.fit_transform(behaviour)
alone_encoded = le.fit_transform(alone)
negativity_encoded = le.fit_transform(negativity)
rejection_encoded = le.fit_transform(rejection)
result_encoded = le.fit_transform(result)
# Combining all criterias together
features = zip(feeling_encoded,behaviour_encoded,alone_encoded,negativity_encoded,rejection_encoded)
# Traning the Gausian classifier using the training set
model.fit(list(features),result_encoded)
# Passing custom data to the trained model
predicted = model.predict([user_query])
val = np.float32(predicted[0])
# Output JSON
output = { 'social_anxiety': val.item() }
# Outshelling the result
return (output)
# Setup the Api resource routing here
# Route the URL to the resource
api.add_resource(PredictResult, '/')
if __name__ == '__main__':
app.run(debug=True) | [
"sklearn.preprocessing.LabelEncoder",
"flask_restful.reqparse.RequestParser",
"flask_restful.Api",
"flask.Flask",
"sklearn.naive_bayes.GaussianNB",
"numpy.float32"
] | [((207, 222), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (212, 222), False, 'from flask import Flask\n'), ((229, 237), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (232, 237), False, 'from flask_restful import reqparse, abort, Api, Resource\n'), ((272, 300), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (298, 300), False, 'from sklearn import preprocessing\n'), ((343, 355), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (353, 355), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((412, 436), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (434, 436), False, 'from flask_restful import reqparse, abort, Api, Resource\n'), ((2420, 2444), 'numpy.float32', 'np.float32', (['predicted[0]'], {}), '(predicted[0])\n', (2430, 2444), True, 'import numpy as np\n')] |
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.animation
import json
import nibabel as nib
from scipy.ndimage.interpolation import zoom
def save_history(filename, trainer):
"""Save the history from a torchsample trainer to file."""
with open(filename, 'w+') as f:
json.dump(trainer.history.epoch_metrics, f)
def load_history(filename):
"""Load the history from a torchsample trainer from file."""
with open(filename) as f:
return json.load(f)
def plot_learning_curve(history):
"""
Plot loss and accuracy over epochs, as recorded in a History object
from training with keras or torchsample.
"""
# noinspection PyTypeChecker
fig, axes = plt.subplots(2, sharex=True, figsize=(10, 7))
epochs = range(1, len(history['loss']) + 1)
plt.sca(axes[0])
plt.grid()
plt.plot(epochs, history['loss'], 'b-', label='Train')
try:
plt.plot(epochs, history['val_loss'], 'b--', label='Val')
except KeyError:
pass
plt.ylabel('Loss')
plt.ylim(0, 1.5)
plt.legend()
plt.sca(axes[1])
plt.grid()
plt.plot(epochs, history['acc_metric'], 'r-', label='Train')
try:
plt.plot(epochs, history['val_acc_metric'], 'r--', label='Val')
except KeyError:
pass
plt.xlabel('Epoch')
plt.ylabel('Accuracy / %')
plt.legend()
def load_nifti(file_path, mask=None, z_factor=None, remove_nan=True):
"""Load a 3D array from a NIFTI file."""
img = nib.load(file_path)
struct_arr = np.array(img.get_data())
if remove_nan:
struct_arr = np.nan_to_num(struct_arr)
if mask is not None:
struct_arr *= mask
if z_factor is not None:
struct_arr = np.around(zoom(struct_arr, z_factor), 0)
return struct_arr
def save_nifti(file_path, struct_arr):
"""Save a 3D array to a NIFTI file."""
img = nib.Nifti1Image(struct_arr, np.eye(4))
nib.save(img, file_path)
# Transparent colormap (alpha to red), that is used for plotting an overlay.
# See https://stackoverflow.com/questions/37327308/add-alpha-to-an-existing-matplotlib-colormap
alpha_to_red_cmap = np.zeros((256, 4))
alpha_to_red_cmap[:, 0] = 0.8
alpha_to_red_cmap[:, -1] = np.linspace(0, 1, 256) # cmap.N-20) # alpha values
alpha_to_red_cmap = mpl.colors.ListedColormap(alpha_to_red_cmap)
red_to_alpha_cmap = np.zeros((256, 4))
red_to_alpha_cmap[:, 0] = 0.8
red_to_alpha_cmap[:, -1] = np.linspace(1, 0, 256) # cmap.N-20) # alpha values
red_to_alpha_cmap = mpl.colors.ListedColormap(red_to_alpha_cmap)
def plot_slices(struct_arr, num_slices=7, cmap='gray', vmin=None, vmax=None, overlay=None,
overlay_cmap=alpha_to_red_cmap, overlay_vmin=None, overlay_vmax=None):
"""
Plot equally spaced slices of a 3D image (and an overlay) along every axis
Args:
struct_arr (3D array or tensor): The 3D array to plot (usually from a nifti file).
num_slices (int): The number of slices to plot for each dimension.
cmap: The colormap for the image (default: `'gray'`).
vmin (float): Same as in matplotlib.imshow. If `None`, take the global minimum of `struct_arr`.
vmax (float): Same as in matplotlib.imshow. If `None`, take the global maximum of `struct_arr`.
overlay (3D array or tensor): The 3D array to plot as an overlay on top of the image. Same size as `struct_arr`.
overlay_cmap: The colomap for the overlay (default: `alpha_to_red_cmap`).
overlay_vmin (float): Same as in matplotlib.imshow. If `None`, take the global minimum of `overlay`.
overlay_vmax (float): Same as in matplotlib.imshow. If `None`, take the global maximum of `overlay`.
"""
if vmin is None:
vmin = struct_arr.min()
if vmax is None:
vmax = struct_arr.max()
if overlay_vmin is None and overlay is not None:
overlay_vmin = overlay.min()
if overlay_vmax is None and overlay is not None:
overlay_vmax = overlay.max()
print(vmin, vmax, overlay_vmin, overlay_vmax)
fig, axes = plt.subplots(3, num_slices, figsize=(15, 6))
intervals = np.asarray(struct_arr.shape) / num_slices
for axis, axis_label in zip([0, 1, 2], ['x', 'y', 'z']):
for i, ax in enumerate(axes[axis]):
i_slice = int(np.round(intervals[axis] / 2 + i * intervals[axis]))
# print(axis_label, 'plotting slice', i_slice)
plt.sca(ax)
plt.axis('off')
plt.imshow(sp.ndimage.rotate(np.take(struct_arr, i_slice, axis=axis), 90), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=None)
plt.text(0.03, 0.97, '{}={}'.format(axis_label, i_slice), color='white',
horizontalalignment='left', verticalalignment='top', transform=ax.transAxes)
if overlay is not None:
plt.imshow(sp.ndimage.rotate(np.take(overlay, i_slice, axis=axis), 90), cmap=overlay_cmap,
vmin=overlay_vmin, vmax=overlay_vmax, interpolation=None)
def animate_slices(struct_arr, overlay=None, axis=0, reverse_direction=False, interval=40, vmin=None, vmax=None,
overlay_vmin=None, overlay_vmax=None):
"""
Create a matplotlib animation that moves through a 3D image along a specified axis.
"""
if vmin is None:
vmin = struct_arr.min()
if vmax is None:
vmax = struct_arr.max()
if overlay_vmin is None and overlay is not None:
overlay_vmin = overlay.min()
if overlay_vmax is None and overlay is not None:
overlay_vmax = overlay.max()
fig, ax = plt.subplots()
axis_label = ['x', 'y', 'z'][axis]
# TODO: If I select slice 50 here at the beginning, the plots look different.
im = ax.imshow(np.take(struct_arr, 0, axis=axis), vmin=vmin, vmax=vmax, cmap='gray', interpolation=None,
animated=True)
if overlay is not None:
im_overlay = ax.imshow(np.take(overlay, 0, axis=axis), vmin=overlay_vmin, vmax=overlay_vmax,
cmap=alpha_to_red_cmap, interpolation=None, animated=True)
text = ax.text(0.03, 0.97, '{}={}'.format(axis_label, 0), color='white',
horizontalalignment='left', verticalalignment='top', transform=ax.transAxes)
ax.axis('off')
def update(i):
im.set_array(np.take(struct_arr, i, axis=axis))
if overlay is not None:
im_overlay.set_array(np.take(overlay, i, axis=axis))
text.set_text('{}={}'.format(axis_label, i))
return im, text
num_frames = struct_arr.shape[axis]
if reverse_direction:
frames = np.arange(num_frames - 1, 0, -1)
else:
frames = np.arange(0, num_frames)
# noinspection PyTypeChecker
return mpl.animation.FuncAnimation(fig, update, frames=frames, interval=interval, blit=True)
def resize_image(img, size, interpolation=0):
"""Resize img to size. Interpolation between 0 (no interpolation) and 5 (maximum interpolation)."""
zoom_factors = np.asarray(size) / np.asarray(img.shape)
return sp.ndimage.zoom(img, zoom_factors, order=interpolation)
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"nibabel.load",
"scipy.ndimage.interpolation.zoom",
"scipy.ndimage.zoom",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"matplotlib.colors.ListedColormap",
"numpy.take",
"numpy.linspace",
"matplotl... | [((2194, 2212), 'numpy.zeros', 'np.zeros', (['(256, 4)'], {}), '((256, 4))\n', (2202, 2212), True, 'import numpy as np\n'), ((2270, 2292), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(256)'], {}), '(0, 1, 256)\n', (2281, 2292), True, 'import numpy as np\n'), ((2343, 2387), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['alpha_to_red_cmap'], {}), '(alpha_to_red_cmap)\n', (2368, 2387), True, 'import matplotlib as mpl\n'), ((2409, 2427), 'numpy.zeros', 'np.zeros', (['(256, 4)'], {}), '((256, 4))\n', (2417, 2427), True, 'import numpy as np\n'), ((2485, 2507), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(256)'], {}), '(1, 0, 256)\n', (2496, 2507), True, 'import numpy as np\n'), ((2558, 2602), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['red_to_alpha_cmap'], {}), '(red_to_alpha_cmap)\n', (2583, 2602), True, 'import matplotlib as mpl\n'), ((765, 810), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'sharex': '(True)', 'figsize': '(10, 7)'}), '(2, sharex=True, figsize=(10, 7))\n', (777, 810), True, 'import matplotlib.pyplot as plt\n'), ((865, 881), 'matplotlib.pyplot.sca', 'plt.sca', (['axes[0]'], {}), '(axes[0])\n', (872, 881), True, 'import matplotlib.pyplot as plt\n'), ((886, 896), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (894, 896), True, 'import matplotlib.pyplot as plt\n'), ((901, 955), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', "history['loss']", '"""b-"""'], {'label': '"""Train"""'}), "(epochs, history['loss'], 'b-', label='Train')\n", (909, 955), True, 'import matplotlib.pyplot as plt\n'), ((1069, 1087), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (1079, 1087), True, 'import matplotlib.pyplot as plt\n'), ((1092, 1108), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1.5)'], {}), '(0, 1.5)\n', (1100, 1108), True, 'import matplotlib.pyplot as plt\n'), ((1113, 1125), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1123, 1125), True, 'import matplotlib.pyplot as plt\n'), ((1131, 1147), 'matplotlib.pyplot.sca', 'plt.sca', (['axes[1]'], {}), '(axes[1])\n', (1138, 1147), True, 'import matplotlib.pyplot as plt\n'), ((1152, 1162), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1160, 1162), True, 'import matplotlib.pyplot as plt\n'), ((1167, 1227), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', "history['acc_metric']", '"""r-"""'], {'label': '"""Train"""'}), "(epochs, history['acc_metric'], 'r-', label='Train')\n", (1175, 1227), True, 'import matplotlib.pyplot as plt\n'), ((1347, 1366), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (1357, 1366), True, 'import matplotlib.pyplot as plt\n'), ((1371, 1397), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy / %"""'], {}), "('Accuracy / %')\n", (1381, 1397), True, 'import matplotlib.pyplot as plt\n'), ((1402, 1414), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1412, 1414), True, 'import matplotlib.pyplot as plt\n'), ((1542, 1561), 'nibabel.load', 'nib.load', (['file_path'], {}), '(file_path)\n', (1550, 1561), True, 'import nibabel as nib\n'), ((1974, 1998), 'nibabel.save', 'nib.save', (['img', 'file_path'], {}), '(img, file_path)\n', (1982, 1998), True, 'import nibabel as nib\n'), ((4099, 4143), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', 'num_slices'], {'figsize': '(15, 6)'}), '(3, num_slices, figsize=(15, 6))\n', (4111, 4143), True, 'import matplotlib.pyplot as plt\n'), ((5653, 5667), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5665, 5667), True, 'import matplotlib.pyplot as plt\n'), ((6808, 6897), 'matplotlib.animation.FuncAnimation', 'mpl.animation.FuncAnimation', (['fig', 'update'], {'frames': 'frames', 'interval': 'interval', 'blit': '(True)'}), '(fig, update, frames=frames, interval=interval,\n blit=True)\n', (6835, 6897), True, 'import matplotlib as mpl\n'), ((7117, 7172), 'scipy.ndimage.zoom', 'sp.ndimage.zoom', (['img', 'zoom_factors'], {'order': 'interpolation'}), '(img, zoom_factors, order=interpolation)\n', (7132, 7172), True, 'import scipy as sp\n'), ((349, 392), 'json.dump', 'json.dump', (['trainer.history.epoch_metrics', 'f'], {}), '(trainer.history.epoch_metrics, f)\n', (358, 392), False, 'import json\n'), ((533, 545), 'json.load', 'json.load', (['f'], {}), '(f)\n', (542, 545), False, 'import json\n'), ((973, 1030), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', "history['val_loss']", '"""b--"""'], {'label': '"""Val"""'}), "(epochs, history['val_loss'], 'b--', label='Val')\n", (981, 1030), True, 'import matplotlib.pyplot as plt\n'), ((1245, 1308), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', "history['val_acc_metric']", '"""r--"""'], {'label': '"""Val"""'}), "(epochs, history['val_acc_metric'], 'r--', label='Val')\n", (1253, 1308), True, 'import matplotlib.pyplot as plt\n'), ((1645, 1670), 'numpy.nan_to_num', 'np.nan_to_num', (['struct_arr'], {}), '(struct_arr)\n', (1658, 1670), True, 'import numpy as np\n'), ((1959, 1968), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1965, 1968), True, 'import numpy as np\n'), ((4160, 4188), 'numpy.asarray', 'np.asarray', (['struct_arr.shape'], {}), '(struct_arr.shape)\n', (4170, 4188), True, 'import numpy as np\n'), ((5809, 5842), 'numpy.take', 'np.take', (['struct_arr', '(0)'], {'axis': 'axis'}), '(struct_arr, 0, axis=axis)\n', (5816, 5842), True, 'import numpy as np\n'), ((6678, 6710), 'numpy.arange', 'np.arange', (['(num_frames - 1)', '(0)', '(-1)'], {}), '(num_frames - 1, 0, -1)\n', (6687, 6710), True, 'import numpy as np\n'), ((6738, 6762), 'numpy.arange', 'np.arange', (['(0)', 'num_frames'], {}), '(0, num_frames)\n', (6747, 6762), True, 'import numpy as np\n'), ((7065, 7081), 'numpy.asarray', 'np.asarray', (['size'], {}), '(size)\n', (7075, 7081), True, 'import numpy as np\n'), ((7084, 7105), 'numpy.asarray', 'np.asarray', (['img.shape'], {}), '(img.shape)\n', (7094, 7105), True, 'import numpy as np\n'), ((1783, 1809), 'scipy.ndimage.interpolation.zoom', 'zoom', (['struct_arr', 'z_factor'], {}), '(struct_arr, z_factor)\n', (1787, 1809), False, 'from scipy.ndimage.interpolation import zoom\n'), ((4459, 4470), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (4466, 4470), True, 'import matplotlib.pyplot as plt\n'), ((4483, 4498), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4491, 4498), True, 'import matplotlib.pyplot as plt\n'), ((5992, 6022), 'numpy.take', 'np.take', (['overlay', '(0)'], {'axis': 'axis'}), '(overlay, 0, axis=axis)\n', (5999, 6022), True, 'import numpy as np\n'), ((6385, 6418), 'numpy.take', 'np.take', (['struct_arr', 'i'], {'axis': 'axis'}), '(struct_arr, i, axis=axis)\n', (6392, 6418), True, 'import numpy as np\n'), ((4334, 4385), 'numpy.round', 'np.round', (['(intervals[axis] / 2 + i * intervals[axis])'], {}), '(intervals[axis] / 2 + i * intervals[axis])\n', (4342, 4385), True, 'import numpy as np\n'), ((6485, 6515), 'numpy.take', 'np.take', (['overlay', 'i'], {'axis': 'axis'}), '(overlay, i, axis=axis)\n', (6492, 6515), True, 'import numpy as np\n'), ((4540, 4579), 'numpy.take', 'np.take', (['struct_arr', 'i_slice'], {'axis': 'axis'}), '(struct_arr, i_slice, axis=axis)\n', (4547, 4579), True, 'import numpy as np\n'), ((4927, 4963), 'numpy.take', 'np.take', (['overlay', 'i_slice'], {'axis': 'axis'}), '(overlay, i_slice, axis=axis)\n', (4934, 4963), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Unique Crater Distribution Functions
Functions for extracting craters from model target predictions and filtering
out duplicates.
"""
from __future__ import absolute_import, division, print_function
from PIL import Image
import matplotlib
import cv2
import matplotlib.pyplot as plt
import numpy as np
import h5py
import sys
import utils.template_match_target as tmt
import utils.processing as proc
import utils.transform as trf
from keras.models import load_model
import os
import pandas as pd
from input_data_gen import ringmaker,circlemaker,get_merge_indices
#########################
def get_model_preds(CP):
"""Reads in or generates model predictions.
Parameters
----------
CP : dict
Containins directory locations for loading data and storing
predictions.
Returns
-------
craters : h5py
Model predictions.
"""
n_imgs, dtype = CP['n_imgs'], CP['datatype']
data = h5py.File(CP['dir_data'], 'r')
Data = {
dtype: [data['input_images'][:n_imgs].astype('float32'),
data['target_masks'][:n_imgs].astype('float32')]
}
data.close()
proc.preprocess(Data)
model = load_model(CP['dir_model'])
preds=[]
for i in range(0,n_imgs,2):
pred = model.predict(Data[dtype][0][i:i+2])
for j in range(len(pred)):
preds.append(pred[j])
# save
h5f = h5py.File(CP['dir_preds'], 'w')
h5f.create_dataset(dtype, data=preds)
#h5f.close()
print("Successfully generated and saved model predictions.")
return preds
def get_data(CP):
"""Reads in or generates model predictions.
Parameters
----------
CP : dict
Containins directory locations for loading data and storing
predictions.
Returns
-------
craters : h5py
Model predictions.
"""
n_imgs, dtype = CP['n_imgs'], CP['datatype']
data = h5py.File(CP['dir_data'], 'r')
Data = {
dtype: [data['input_images'][:n_imgs].astype('float32'),
data['target_masks'][:n_imgs].astype('float32')]
}
data.close()
craters = pd.HDFStore(CP['crater_data'], 'r')
csvs=[]
minrad, maxrad, cutrad, n_csvs ,dim= 3, 50, 0.8, len(craters),256
diam = 'Diameter (pix)'
for i in range(n_csvs):
csv = craters[proc.get_id(i,2)]
# remove small/large/half craters
csv = csv[(csv[diam] < 2 * maxrad) & (csv[diam] > 2 * minrad)]
csv = csv[(csv['x'] + cutrad * csv[diam] / 2 <= dim)]
csv = csv[(csv['y'] + cutrad * csv[diam] / 2 <= dim)]
csv = csv[(csv['x'] - cutrad * csv[diam] / 2 > 0)]
csv = csv[(csv['y'] - cutrad * csv[diam] / 2 > 0)]
if len(csv) < 1: # Exclude csvs with few craters
csvs.append([-1])
else:
csv_coords = np.asarray((csv['x'], csv['y'], csv[diam] / 2)).T
csvs.append(csv_coords)
return Data,csvs
def get_coords_classification(detect_coords,note_coords,longlat_thresh2 = 1.8,rad_thresh = 1.0):
true_carter=[]
detect_carter=[]
Undetected_carter=[]
detect_list=[]
note_list=[]
for i in range(len(detect_coords)):
lo,la,r=detect_coords[i]
for j in range(len(note_coords)):
#print(note_coords)
Long,Lat,Rad=note_coords[j]
minr = np.minimum(r, Rad)
dL = ((Long - lo)**2 + (Lat - la)**2) / minr**2
dR = abs(Rad - r) / minr
if (dR < rad_thresh) & (dL < longlat_thresh2):
detect_list.append(i)
note_list.append(j)
true_carter.append(detect_coords[i])
break
for k in range(len(note_coords)):
if k not in note_list:
Undetected_carter.append(note_coords[k])
for k in range(len(detect_coords)):
if k not in detect_list:
detect_carter.append(detect_coords[k])
return true_carter,detect_carter,Undetected_carter
def draw_pic(img,detect_coords,note_coords,save_path):
true_carter,detect_carter,Undetected_carter=get_coords_classification(detect_coords,note_coords)
true_carter_color = (255,0,0)#blue
detect_carter_color = (0,255,0)#green
Undetected_carter_color=(0,0,255)#red
ring_width=2
for x,y,r in true_carter:
cv2.circle(img, (int(x), int(y)), int(r), true_carter_color, ring_width)
for x,y,r in detect_carter:
cv2.circle(img, (int(x), int(y)), int(r), detect_carter_color, ring_width)
for x,y,r in Undetected_carter:
cv2.circle(img, (int(x), int(y)), int(r), Undetected_carter_color, ring_width)
cv2.imwrite(save_path,img)
#########################
def add_unique_craters(craters, craters_unique, thresh_longlat2, thresh_rad):
"""Generates unique crater distribution by filtering out duplicates.
Parameters
----------
craters : array
Crater tuples from a single image in the form (long, lat, radius).
craters_unique : array
Master array of unique crater tuples in the form (long, lat, radius)
thresh_longlat2 : float.
Hyperparameter that controls the minimum squared longitude/latitude
difference between craters to be considered unique entries.
thresh_rad : float
Hyperparaeter that controls the minimum squared radius difference
between craters to be considered unique entries.
Returns
-------
craters_unique : array
Modified master array of unique crater tuples with new crater entries.
"""
k2d = 180. / (np.pi * 1737.4) # km to deg
Long, Lat, Rad = craters_unique.T
for j in range(len(craters)):
lo, la, r = craters[j].T
la_m = (la + Lat) / 2.
minr = np.minimum(r, Rad) # be liberal when filtering dupes
# duplicate filtering criteria
dL = (((Long - lo) / (minr * k2d / np.cos(np.pi * la_m / 180.)))**2
+ ((Lat - la) / (minr * k2d))**2)
dR = np.abs(Rad - r) / minr
index = (dR < thresh_rad) & (dL < thresh_longlat2)
if len(np.where(index == True)[0]) == 0:
craters_unique = np.vstack((craters_unique, craters[j]))
return craters_unique
#########################
def estimate_longlatdiamkm(dim, llbd, distcoeff, coords):
"""First-order estimation of long/lat, and radius (km) from
(Orthographic) x/y position and radius (pix).
For images transformed from ~6000 pixel crops of the 30,000 pixel
LROC-Kaguya DEM, this results in < ~0.4 degree latitude, <~0.2
longitude offsets (~2% and ~1% of the image, respectively) and ~2% error in
radius. Larger images thus may require an exact inverse transform,
depending on the accuracy demanded by the user.
Parameters
----------
dim : tuple or list
(width, height) of input images.
llbd : tuple or list
Long/lat limits (long_min, long_max, lat_min, lat_max) of image.
distcoeff : float
Ratio between the central heights of the transformed image and original
image.
coords : numpy.ndarray
Array of crater x coordinates, y coordinates, and pixel radii.
Returns
-------
craters_longlatdiamkm : numpy.ndarray
Array of crater longitude, latitude and radii in km.
"""
# Expand coords.
long_pix, lat_pix, radii_pix = coords.T
# Determine radius (km).
km_per_pix = 1. / trf.km2pix(dim[1], llbd[3] - llbd[2], dc=distcoeff)
radii_km = radii_pix * km_per_pix
# Determine long/lat.
deg_per_pix = km_per_pix * 180. / (np.pi * 1737.4)
long_central = 0.5 * (llbd[0] + llbd[1])
lat_central = 0.5 * (llbd[2] + llbd[3])
# Iterative method for determining latitude.
lat_deg_firstest = lat_central - deg_per_pix * (lat_pix - dim[1] / 2.)
latdiff = abs(lat_central - lat_deg_firstest)
# Protect against latdiff = 0 situation.
latdiff[latdiff < 1e-7] = 1e-7
lat_deg = lat_central - (deg_per_pix * (lat_pix - dim[1] / 2.) *
(np.pi * latdiff / 180.) /
np.sin(np.pi * latdiff / 180.))
# Determine longitude using determined latitude.
long_deg = long_central + (deg_per_pix * (long_pix - dim[0] / 2.) /
np.cos(np.pi * lat_deg / 180.))
# Return combined long/lat/radius array.
return np.column_stack((long_deg, lat_deg, radii_km))
def extract_unique_craters(CP, craters_unique):
"""Top level function that extracts craters from model predictions,
converts craters from pixel to real (degree, km) coordinates, and filters
out duplicate detections across images.
Parameters
----------
CP : dict
Crater Parameters needed to run the code.
craters_unique : array
Empty master array of unique crater tuples in the form
(long, lat, radius).
Returns
-------
craters_unique : array
Filled master array of unique crater tuples.
"""
# Load/generate model preds
try:
preds = h5py.File(CP['dir_preds'], 'r')[CP['datatype']]
print("Loaded model predictions successfully")
except:
print("Couldnt load model predictions, generating")
preds = get_model_preds(CP)
Data,Carters=get_data(CP)
# need for long/lat bounds
P = h5py.File(CP['dir_data'], 'r')
llbd, pbd, distcoeff = ('longlat_bounds', 'pix_bounds',
'pix_distortion_coefficient')
#r_moon = 1737.4
dim = (float(CP['dim']), float(CP['dim']))
N_matches_tot = 0
if not os.path.exists(CP['result_img']):
os.mkdir(CP['result_img'])
lenstr=""
lenstr1="true_carter"
lenstr2="detect_carter"
lenstr3="undetected_carter"
num=0
num1=0
num2=0
num3=0
for i in range(CP['n_imgs']):
id = proc.get_id(i,2)
print("Drawing picture:%d" %i)
input_images=Data[CP['datatype']][0][i]
imgs = Image.fromarray(input_images.astype('uint8')).convert('RGB')
img = cv2.cvtColor(np.asarray(imgs),cv2.COLOR_RGB2BGR)
coords = tmt.template_match_t(preds[i])
num=num+len(coords)
lenstr=lenstr+" "+str(len(coords))
matplotlib.image.imsave(CP['result_img']+"/"+str(i)+'_mask.jpg', preds[i])
true_carter,detect_carter,Undetected_carter=get_coords_classification(coords,Carters[i])
lenstr1=lenstr1+" "+str(len(true_carter))
num1=num1+len(true_carter)
lenstr2=lenstr2+" "+str(len(detect_carter))
num2=num2+len(detect_carter)
lenstr3=lenstr3+" "+str(len(Undetected_carter))
num3=num3+len(Undetected_carter)
draw_pic(img,coords,Carters[i],CP['result_img']+"/"+str(i)+'.jpg')
if len(coords) > 0:
# for i in range(len(coords)):
new_craters_unique = estimate_longlatdiamkm(
dim, P[llbd][id], P[distcoeff][id][0], coords)
N_matches_tot += len(coords)
#print(id,new_craters_unique)
# Only add unique (non-duplicate) craters
if len(craters_unique) > 0:
craters_unique = add_unique_craters(new_craters_unique,
craters_unique,
CP['llt2'], CP['rt2'])
else:
craters_unique = np.concatenate((craters_unique,
new_craters_unique))
print(lenstr)
print("total num:%d" %num)
print(lenstr1)
print(num1)
print(lenstr2)
print(num2)
print(lenstr3)
print(num3)
np.save(CP['dir_result'], craters_unique)
return craters_unique
| [
"numpy.column_stack",
"numpy.sin",
"pandas.HDFStore",
"utils.template_match_target.template_match_t",
"numpy.save",
"os.path.exists",
"utils.processing.get_id",
"numpy.where",
"numpy.asarray",
"os.mkdir",
"numpy.vstack",
"numpy.concatenate",
"numpy.abs",
"h5py.File",
"numpy.cos",
"util... | [((960, 990), 'h5py.File', 'h5py.File', (["CP['dir_data']", '"""r"""'], {}), "(CP['dir_data'], 'r')\n", (969, 990), False, 'import h5py\n'), ((1162, 1183), 'utils.processing.preprocess', 'proc.preprocess', (['Data'], {}), '(Data)\n', (1177, 1183), True, 'import utils.processing as proc\n'), ((1197, 1224), 'keras.models.load_model', 'load_model', (["CP['dir_model']"], {}), "(CP['dir_model'])\n", (1207, 1224), False, 'from keras.models import load_model\n'), ((1412, 1443), 'h5py.File', 'h5py.File', (["CP['dir_preds']", '"""w"""'], {}), "(CP['dir_preds'], 'w')\n", (1421, 1443), False, 'import h5py\n'), ((1925, 1955), 'h5py.File', 'h5py.File', (["CP['dir_data']", '"""r"""'], {}), "(CP['dir_data'], 'r')\n", (1934, 1955), False, 'import h5py\n'), ((2137, 2172), 'pandas.HDFStore', 'pd.HDFStore', (["CP['crater_data']", '"""r"""'], {}), "(CP['crater_data'], 'r')\n", (2148, 2172), True, 'import pandas as pd\n'), ((4622, 4649), 'cv2.imwrite', 'cv2.imwrite', (['save_path', 'img'], {}), '(save_path, img)\n', (4633, 4649), False, 'import cv2\n'), ((8338, 8384), 'numpy.column_stack', 'np.column_stack', (['(long_deg, lat_deg, radii_km)'], {}), '((long_deg, lat_deg, radii_km))\n', (8353, 8384), True, 'import numpy as np\n'), ((9295, 9325), 'h5py.File', 'h5py.File', (["CP['dir_data']", '"""r"""'], {}), "(CP['dir_data'], 'r')\n", (9304, 9325), False, 'import h5py\n'), ((11588, 11629), 'numpy.save', 'np.save', (["CP['dir_result']", 'craters_unique'], {}), "(CP['dir_result'], craters_unique)\n", (11595, 11629), True, 'import numpy as np\n'), ((5726, 5744), 'numpy.minimum', 'np.minimum', (['r', 'Rad'], {}), '(r, Rad)\n', (5736, 5744), True, 'import numpy as np\n'), ((7391, 7442), 'utils.transform.km2pix', 'trf.km2pix', (['dim[1]', '(llbd[3] - llbd[2])'], {'dc': 'distcoeff'}), '(dim[1], llbd[3] - llbd[2], dc=distcoeff)\n', (7401, 7442), True, 'import utils.transform as trf\n'), ((9546, 9578), 'os.path.exists', 'os.path.exists', (["CP['result_img']"], {}), "(CP['result_img'])\n", (9560, 9578), False, 'import os\n'), ((9588, 9614), 'os.mkdir', 'os.mkdir', (["CP['result_img']"], {}), "(CP['result_img'])\n", (9596, 9614), False, 'import os\n'), ((9805, 9822), 'utils.processing.get_id', 'proc.get_id', (['i', '(2)'], {}), '(i, 2)\n', (9816, 9822), True, 'import utils.processing as proc\n'), ((10066, 10096), 'utils.template_match_target.template_match_t', 'tmt.template_match_t', (['preds[i]'], {}), '(preds[i])\n', (10086, 10096), True, 'import utils.template_match_target as tmt\n'), ((2333, 2350), 'utils.processing.get_id', 'proc.get_id', (['i', '(2)'], {}), '(i, 2)\n', (2344, 2350), True, 'import utils.processing as proc\n'), ((3346, 3364), 'numpy.minimum', 'np.minimum', (['r', 'Rad'], {}), '(r, Rad)\n', (3356, 3364), True, 'import numpy as np\n'), ((5962, 5977), 'numpy.abs', 'np.abs', (['(Rad - r)'], {}), '(Rad - r)\n', (5968, 5977), True, 'import numpy as np\n'), ((6123, 6162), 'numpy.vstack', 'np.vstack', (['(craters_unique, craters[j])'], {}), '((craters_unique, craters[j]))\n', (6132, 6162), True, 'import numpy as np\n'), ((8061, 8092), 'numpy.sin', 'np.sin', (['(np.pi * latdiff / 180.0)'], {}), '(np.pi * latdiff / 180.0)\n', (8067, 8092), True, 'import numpy as np\n'), ((8249, 8280), 'numpy.cos', 'np.cos', (['(np.pi * lat_deg / 180.0)'], {}), '(np.pi * lat_deg / 180.0)\n', (8255, 8280), True, 'import numpy as np\n'), ((9014, 9045), 'h5py.File', 'h5py.File', (["CP['dir_preds']", '"""r"""'], {}), "(CP['dir_preds'], 'r')\n", (9023, 9045), False, 'import h5py\n'), ((10012, 10028), 'numpy.asarray', 'np.asarray', (['imgs'], {}), '(imgs)\n', (10022, 10028), True, 'import numpy as np\n'), ((2834, 2881), 'numpy.asarray', 'np.asarray', (["(csv['x'], csv['y'], csv[diam] / 2)"], {}), "((csv['x'], csv['y'], csv[diam] / 2))\n", (2844, 2881), True, 'import numpy as np\n'), ((11328, 11380), 'numpy.concatenate', 'np.concatenate', (['(craters_unique, new_craters_unique)'], {}), '((craters_unique, new_craters_unique))\n', (11342, 11380), True, 'import numpy as np\n'), ((6060, 6083), 'numpy.where', 'np.where', (['(index == True)'], {}), '(index == True)\n', (6068, 6083), True, 'import numpy as np\n'), ((5868, 5896), 'numpy.cos', 'np.cos', (['(np.pi * la_m / 180.0)'], {}), '(np.pi * la_m / 180.0)\n', (5874, 5896), True, 'import numpy as np\n')] |
import itertools
import numpy as np
from ..sequences import Genome
def in_silico_mutagenesis_sequences(sequence,
mutate_n_bases=1,
reference_sequence=Genome,
start_position=0,
end_position=None):
"""
Creates a list containing each mutation that occurs from an
*in silico* mutagenesis across the whole sequence.
Please note that we have not parallelized this function yet, so
runtime increases exponentially when you increase `mutate_n_bases`.
Parameters
----------
sequence : str
A string containing the sequence we would like to mutate.
mutate_n_bases : int, optional
Default is 1. The number of base changes to make with each set of
mutations evaluated, e.g. `mutate_n_bases = 2` considers all
pairs of SNPs.
reference_sequence : class, optional
Default is `selene_sdk.sequences.Genome`. The type of sequence
that has been passed in.
start_position : int, optional
Default is 0. The starting position of the subsequence to be
mutated.
end_position : int or None, optional
Default is None. The ending position of the subsequence to be
mutated. If left as `None`, then `len(sequence)` will be
used.
Returns
-------
list(list(tuple))
A list of all possible mutations. Each element in the list is
itself a list of tuples, e.g. element = [(0, 'T')] when only mutating
1 base at a time. Each tuple is the position to mutate and the base
with which we are replacing the reference base.
For a sequence of length 1000, mutating 1 base at a time means that
we return a list with length of 3000-4000, depending on the number of
unknown bases in the input sequences.
Raises
------
ValueError
If the value of `start_position` or `end_position` is negative.
ValueError
If there are fewer than `mutate_n_bases` between `start_position`
and `end_position`.
ValueError
If `start_position` is greater or equal to `end_position`.
ValueError
If `start_position` is not less than `len(sequence)`.
ValueError
If `end_position` is greater than `len(sequence)`.
"""
if end_position is None:
end_position = len(sequence)
if start_position >= end_position:
raise ValueError(("Starting positions must be less than the ending "
"positions. Found a starting position of {0} with "
"an ending position of {1}.").format(start_position,
end_position))
if start_position < 0:
raise ValueError("Negative starting positions are not supported.")
if end_position < 0:
raise ValueError("Negative ending positions are not supported.")
if start_position >= len(sequence):
raise ValueError(("Starting positions must be less than the sequence length."
" Found a starting position of {0} with a sequence length "
"of {1}.").format(start_position, len(sequence)))
if end_position > len(sequence):
raise ValueError(("Ending positions must be less than or equal to the sequence "
"length. Found an ending position of {0} with a sequence "
"length of {1}.").format(end_position, len(sequence)))
if (end_position - start_position) < mutate_n_bases:
raise ValueError(("Fewer bases exist in the substring specified by the starting "
"and ending positions than need to be mutated. There are only "
"{0} currently, but {1} bases must be mutated at a "
"time").format(end_position - start_position, mutate_n_bases))
sequence_alts = []
for index, ref in enumerate(sequence):
alts = []
for base in reference_sequence.BASES_ARR:
if base == ref:
continue
alts.append(base)
sequence_alts.append(alts)
all_mutated_sequences = []
for indices in itertools.combinations(
range(start_position, end_position), mutate_n_bases):
pos_mutations = []
for i in indices:
pos_mutations.append(sequence_alts[i])
for mutations in itertools.product(*pos_mutations):
all_mutated_sequences.append(list(zip(indices, mutations)))
return all_mutated_sequences
def mutate_sequence(encoding,
mutation_information,
reference_sequence=Genome):
"""
Transforms a sequence with a set of mutations.
Parameters
----------
encoding : numpy.ndarray
An :math:`L \\times N` array (where :math:`L` is the sequence's
length and :math:`N` is the size of the sequence type's
alphabet) holding the one-hot encoding of the
reference sequence.
mutation_information : list(tuple)
List of tuples of (`int`, `str`). Each tuple is the position to
mutate and the base to which to mutate that position in the
sequence.
reference_sequence : class, optional
Default is `selene_sdk.sequences.Genome`. A reference sequence
from which to retrieve smaller sequences..
Returns
-------
numpy.ndarray
An :math:`L \\times N` array holding the one-hot encoding of
the mutated sequence.
"""
mutated_seq = np.copy(encoding)
for (position, alt) in mutation_information:
replace_base = reference_sequence.BASE_TO_INDEX[alt]
mutated_seq[position, :] = 0
mutated_seq[position, replace_base] = 1
return mutated_seq
def _ism_sample_id(sequence, mutation_information):
"""
TODO
Parameters
----------
sequence : str
The input sequence to mutate.
mutation_information : list(tuple)
TODO
Returns
-------
TODO
TODO
"""
positions = []
refs = []
alts = []
for (position, alt) in mutation_information:
positions.append(str(position))
refs.append(sequence[position])
alts.append(alt)
return (';'.join(positions), ';'.join(refs), ';'.join(alts))
| [
"numpy.copy",
"itertools.product"
] | [((5634, 5651), 'numpy.copy', 'np.copy', (['encoding'], {}), '(encoding)\n', (5641, 5651), True, 'import numpy as np\n'), ((4506, 4539), 'itertools.product', 'itertools.product', (['*pos_mutations'], {}), '(*pos_mutations)\n', (4523, 4539), False, 'import itertools\n')] |
"""
Area calculations
-----------------
Calculates the area of pixels for a give grid input.
"""
def earth_radius(lat):
"""Calculate the radius of the earth for a given latitude
Args:
lat (array, float): latitude value (-90 : 90)
Returns:
array: radius in metres
"""
from numpy import cos, deg2rad, sin
lat = deg2rad(lat)
a = 6378137
b = 6356752
r = (
((a ** 2 * cos(lat)) ** 2 + (b ** 2 * sin(lat)) ** 2)
/ ((a * cos(lat)) ** 2 + (b * sin(lat)) ** 2)
) ** 0.5
return r
def area_grid(lat, lon, return_dataarray=False):
"""Calculate the area of each grid cell for given lats and lons
Args:
lat (array): latitudes in decimal degrees of length N
lon (array): longitudes in decimal degrees of length M
return_dataarray (bool, optional): if True returns xr.DataArray, else array
Returns:
array, xr.DataArray: area of each grid cell in meters
References:
https://github.com/chadagreene/CDT/blob/master/cdt/cdtarea.m
"""
from numpy import cos, deg2rad, gradient, meshgrid
ylat, xlon = meshgrid(lat, lon)
R = earth_radius(ylat)
dlat = deg2rad(gradient(ylat, axis=1))
dlon = deg2rad(gradient(xlon, axis=0))
dy = dlat * R
dx = dlon * R * cos(deg2rad(ylat))
area = dy * dx
if not return_dataarray:
return area
else:
from xarray import DataArray
xda = DataArray(
area.T,
dims=["lat", "lon"],
coords={"lat": lat, "lon": lon},
attrs=dict(
long_name="Area per pixel",
units="m^2",
description=(
"Area per pixel as calculated by pySeaFlux. The non-"
"spherical shape of Earth is taken into account."
),
),
)
return xda
def get_area_from_dataset(dataarray, lat_name="lat", lon_name="lon"):
"""
Calculate the grid cell area from a xr.Dataset or xr.DataArray.
"""
da = dataarray
x = da.lon.values
y = da.lat.values
area = area_grid(y, x, return_dataarray=True)
return area
| [
"numpy.deg2rad",
"numpy.cos",
"numpy.sin",
"numpy.meshgrid",
"numpy.gradient"
] | [((355, 367), 'numpy.deg2rad', 'deg2rad', (['lat'], {}), '(lat)\n', (362, 367), False, 'from numpy import cos, deg2rad, gradient, meshgrid\n'), ((1135, 1153), 'numpy.meshgrid', 'meshgrid', (['lat', 'lon'], {}), '(lat, lon)\n', (1143, 1153), False, 'from numpy import cos, deg2rad, gradient, meshgrid\n'), ((1201, 1223), 'numpy.gradient', 'gradient', (['ylat'], {'axis': '(1)'}), '(ylat, axis=1)\n', (1209, 1223), False, 'from numpy import cos, deg2rad, gradient, meshgrid\n'), ((1244, 1266), 'numpy.gradient', 'gradient', (['xlon'], {'axis': '(0)'}), '(xlon, axis=0)\n', (1252, 1266), False, 'from numpy import cos, deg2rad, gradient, meshgrid\n'), ((1311, 1324), 'numpy.deg2rad', 'deg2rad', (['ylat'], {}), '(ylat)\n', (1318, 1324), False, 'from numpy import cos, deg2rad, gradient, meshgrid\n'), ((429, 437), 'numpy.cos', 'cos', (['lat'], {}), '(lat)\n', (432, 437), False, 'from numpy import cos, deg2rad, gradient, meshgrid\n'), ((456, 464), 'numpy.sin', 'sin', (['lat'], {}), '(lat)\n', (459, 464), False, 'from numpy import cos, deg2rad, sin\n'), ((488, 496), 'numpy.cos', 'cos', (['lat'], {}), '(lat)\n', (491, 496), False, 'from numpy import cos, deg2rad, gradient, meshgrid\n'), ((510, 518), 'numpy.sin', 'sin', (['lat'], {}), '(lat)\n', (513, 518), False, 'from numpy import cos, deg2rad, sin\n')] |
# -*- coding: utf-8 -*-
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
class ExogenousBaseModel(ABC):
"""
Exogenous Abstract Base Class.
"""
model = None
@abstractmethod
def __init__(self):
self.fitted = None
pass
def __str__(cls):
return f'{cls.model} model'
@abstractmethod
def fit(self, y, **kwargs):
"""
Fit the exogenous component in the boosting loop.
Parameters
----------
time_series : TYPE
DESCRIPTION.
**kwargs : TYPE
DESCRIPTION.
Returns
-------
None.
"""
pass
@abstractmethod
def predict(self, future_exogenous, forecast_horizon):
pass
def __add__(self, exo_object):
"""
Add two exo obj together, useful for ensembling or just quick updating of exo components.
Parameters
----------
exo_object : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
return self.fitted + exo_object.fitted
def __mul__(self, exo_object):
return self.fitted * exo_object.fitted
def __div__(self, exo_object):
return self.fitted / exo_object.fitted
def __sub__(self, exo_object):
return self.fitted - exo_object.fitted
def append(self, exo_object):
return np.append(self.fitted, exo_object.fitted)
def to_series(self, array):
return pd.Series(array)
| [
"numpy.append",
"pandas.Series"
] | [((1523, 1564), 'numpy.append', 'np.append', (['self.fitted', 'exo_object.fitted'], {}), '(self.fitted, exo_object.fitted)\n', (1532, 1564), True, 'import numpy as np\n'), ((1616, 1632), 'pandas.Series', 'pd.Series', (['array'], {}), '(array)\n', (1625, 1632), True, 'import pandas as pd\n')] |
from __future__ import division
from builtins import str
import numpy as np
import os
import pickle as Pickle
from flarestack.core.results import ResultsHandler
from flarestack.data.icecube.ps_tracks.ps_v002_p01 import ps_v002_p01
from flarestack.shared import plot_output_dir, flux_to_k, analysis_dir
from flarestack.utils.prepare_catalogue import ps_catalogue_name
from flarestack.icecube_utils.reference_sensitivity import reference_sensitivity
import matplotlib.pyplot as plt
from flarestack.utils.custom_dataset import custom_dataset
name_root = "benchmarks/timePDFs/misalignment/"
# Picks ref time ~100 days into IC86-I
ref_time = 55800
window = 100
analyses = dict()
# Shared
energy_pdf = {
"Name": "Power Law",
"Gamma": 2.0,
}
# Initialise Injection
inj_time = {
"Name": "FixedRefBox",
"Fixed Ref Time (MJD)": ref_time,
"Pre-Window": 0,
"Post-Window": window,
}
inj = {
"Injection Time PDF": inj_time,
"Injection Energy PDF": energy_pdf,
"Poisson Smear?": True,
}
cat_res = dict()
sindecs = np.linspace(0.5, -0.5, 3)
# sindecs = [0.0]
offsets = np.linspace(-90.0, 90, 7)
for sindec in sindecs:
source_res = dict()
cat_path = ps_catalogue_name(sindec)
sindec_key = "sindec=" + "{0:.2f}".format(sindec)
name = name_root + sindec_key + "/"
src_res = dict()
for offset in offsets:
full_name = name + str(offset) + "/"
scale = flux_to_k(reference_sensitivity(sindec) * 20) * (
window / (window - abs(offset))
)
# Standard Time Integration
llh_time = {
"Name": "FixedRefBox",
"Fixed Ref Time (MJD)": 55800 + offset,
"Pre-Window": 0,
"Post-Window": window,
}
llh_kwargs = {
"LLH Energy PDF": energy_pdf,
"LLH Time PDF": llh_time,
"Fit Gamma?": True,
"Fit Negative n_s?": True,
"Fit Weights?": False,
}
mh_dict = {
"name": full_name,
"datasets": custom_dataset(
ps_v002_p01, np.load(cat_path), llh_kwargs["LLH Time PDF"]
),
"catalogue": cat_path,
"inj kwargs": inj,
"llh kwargs": llh_kwargs,
"scale": scale,
"n_trials": 100,
"n_steps": 10,
}
analysis_path = analysis_dir + full_name
try:
os.makedirs(analysis_path)
except OSError:
pass
pkl_file = analysis_path + "dict.pkl"
with open(pkl_file, "wb") as f:
Pickle.dump(mh_dict, f)
# rd.submit_to_cluster(pkl_file, n_jobs=20)
# mh = MinimisationHandler(mh_dict)
# mh.iterate_run(mh_dict["scale"], mh_dict["n_steps"], n_trials=100)
# mh.clear()
src_res[offset] = mh_dict
cat_res[sindec_key] = src_res
# rd.wait_for_cluster()
plt.figure()
ax = plt.subplot(111)
cols = ["r", "g", "b", "orange"]
for i, (sindec_key, src_res) in enumerate(cat_res.items()):
name = name_root + sindec_key + "/"
sens = []
offsets = []
for (t, rh_dict) in sorted(src_res.items()):
rh = ResultsHandler(rh_dict)
sens.append(rh.sensitivity)
offsets.append(t)
ax.plot(offsets, sens, label=sindec_key, color=cols[i])
ax.set_ylabel(r"Flux [GeV$^{-1}$ cm$^{-2}$ s$^{-1}$]", fontsize=12)
ax.set_xlabel("Offset (days)")
ax.set_yscale("log")
# plt.legend()
plt.title("Sensitivity for 100 day emission")
ax.legend(loc="upper left", fancybox=True, framealpha=1.0)
plt.tight_layout()
plt.savefig(plot_output_dir(name_root) + "misalignment_sens.pdf")
plt.close()
| [
"pickle.dump",
"os.makedirs",
"flarestack.core.results.ResultsHandler",
"flarestack.shared.plot_output_dir",
"builtins.str",
"matplotlib.pyplot.close",
"numpy.linspace",
"matplotlib.pyplot.figure",
"flarestack.icecube_utils.reference_sensitivity.reference_sensitivity",
"matplotlib.pyplot.tight_lay... | [((1046, 1071), 'numpy.linspace', 'np.linspace', (['(0.5)', '(-0.5)', '(3)'], {}), '(0.5, -0.5, 3)\n', (1057, 1071), True, 'import numpy as np\n'), ((1101, 1126), 'numpy.linspace', 'np.linspace', (['(-90.0)', '(90)', '(7)'], {}), '(-90.0, 90, 7)\n', (1112, 1126), True, 'import numpy as np\n'), ((2911, 2923), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2921, 2923), True, 'import matplotlib.pyplot as plt\n'), ((2929, 2945), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (2940, 2945), True, 'import matplotlib.pyplot as plt\n'), ((3461, 3506), 'matplotlib.pyplot.title', 'plt.title', (['"""Sensitivity for 100 day emission"""'], {}), "('Sensitivity for 100 day emission')\n", (3470, 3506), True, 'import matplotlib.pyplot as plt\n'), ((3567, 3585), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3583, 3585), True, 'import matplotlib.pyplot as plt\n'), ((3652, 3663), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3661, 3663), True, 'import matplotlib.pyplot as plt\n'), ((1192, 1217), 'flarestack.utils.prepare_catalogue.ps_catalogue_name', 'ps_catalogue_name', (['sindec'], {}), '(sindec)\n', (1209, 1217), False, 'from flarestack.utils.prepare_catalogue import ps_catalogue_name\n'), ((3177, 3200), 'flarestack.core.results.ResultsHandler', 'ResultsHandler', (['rh_dict'], {}), '(rh_dict)\n', (3191, 3200), False, 'from flarestack.core.results import ResultsHandler\n'), ((3598, 3624), 'flarestack.shared.plot_output_dir', 'plot_output_dir', (['name_root'], {}), '(name_root)\n', (3613, 3624), False, 'from flarestack.shared import plot_output_dir, flux_to_k, analysis_dir\n'), ((2427, 2453), 'os.makedirs', 'os.makedirs', (['analysis_path'], {}), '(analysis_path)\n', (2438, 2453), False, 'import os\n'), ((2595, 2618), 'pickle.dump', 'Pickle.dump', (['mh_dict', 'f'], {}), '(mh_dict, f)\n', (2606, 2618), True, 'import pickle as Pickle\n'), ((1392, 1403), 'builtins.str', 'str', (['offset'], {}), '(offset)\n', (1395, 1403), False, 'from builtins import str\n'), ((2092, 2109), 'numpy.load', 'np.load', (['cat_path'], {}), '(cat_path)\n', (2099, 2109), True, 'import numpy as np\n'), ((1437, 1466), 'flarestack.icecube_utils.reference_sensitivity.reference_sensitivity', 'reference_sensitivity', (['sindec'], {}), '(sindec)\n', (1458, 1466), False, 'from flarestack.icecube_utils.reference_sensitivity import reference_sensitivity\n')] |
import streamlit as st
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import altair as alt
from requests import get
import re
import os
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
import datetime
import time
import matplotlib.pyplot as plt
import statsmodels.api as sm
from geopy.geocoders import Nominatim
from geopy.distance import geodesic
geolocator = Nominatim(user_agent='myuseragent')
import lxml
import plotly.express as px
from PIL import Image
#with open("styles/style.css") as f:
# st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
st.set_page_config(
page_title="O/U Hockey Analytics",
page_icon=":ice_hockey_stick_and_puck:"
)
#Dummy data to get the header to display correctly
st.markdown("""<Head>
<Title> Test Title</Title><link rel="shortcut icon" href="favicon.ico" type="image/x-icon"> </Head>""",unsafe_allow_html=True)
#Title/Header
st.markdown("""<h1 style="text-align:center;color:white;font-weight:bolder;font-size:70px;font-family:helvetica; background:
-webkit-linear-gradient(#a73305,#000000,#093ff0); -webkit-background-clip:
text;-webkit-text-fill-color: transparent;">NHL<br>Wager<br>Analytics</h1>""",unsafe_allow_html=True)
# Load data
data_load_state = st.text('Checking and Fetching Data...')
#####################################
#### Data Gathering and Cleaning ####
#####################################
master_df = pd.read_csv('master_df.csv')
master_df = master_df.dropna(thresh=10)
start = pd.to_datetime(master_df.Date[-1:]).dt.date.values[0]+datetime.timedelta(days=1)
today = datetime.date.today()
yesterday = today-datetime.timedelta(days = 1)
#Function to covert dates to string
def covert_dates(date1, date2):
covert_list = []
days = pd.date_range(date1, date2, freq='d')
for i in range(len(days)):
covert_list.append(int(days[i].strftime('%Y%m%d')))
return covert_list
#Function to fetch missing data
@st.cache
def get_data(date1, date2):
new_df = pd.DataFrame()
for day in covert_dates(date1, date2):
site = f"https://sportsdatabase.com/nhl/query?output=default&sdql=date%3D{day}&submit=++S+D+Q+L+%21++"
hdr = {'User-Agent': 'Mozilla/5.0'}
req = Request(site, headers=hdr)
page = urlopen(req)
soup = BeautifulSoup(page)
tables = soup.find('table', attrs={'id':'DT_Table'})
page_df = pd.read_html(str(tables))[0]
new_df = pd.concat([new_df, page_df])
time.sleep(1)
return new_df
#Check if the data needs updating
if start <= today:
new_data = get_data(start, today)
master_df = pd.concat([master_df, new_data])
#Save updated data as csv
#master_df.to_csv("master_df.csv", index=False)
def clean_data(df):
df.Date =pd.to_datetime(df.Date)
df= df.sort_values(by=['Team', 'Date']).reset_index()
df.insert(2, "Date_Prev", df.Date.shift(1))
df.insert(2, "Days_Rest", (df.Date_Prev-df.Date)*-1)
df = df.drop(['index','Season', 'P1', 'P2', 'P3'], axis=1)
return df
#Fucntion to identify a team change to break streak counts
def trips(home_or_away, TeamChange, Site):
list =[]
x = 0
for i, j in zip(TeamChange, Site):
if i == False:
x = x
else:
x = 0
if j == home_or_away:
x += 1
else:
x = 0
list.append(x)
return list
#Function to calculate the distance the road team is from home
def distance_calc(df):
df.insert(4,"Team_City", df.Team.map(team_dict['City']))
df.insert(6,"Opp_City", df.Opp.map(team_dict['City']))
df.insert(9,"Team_point", df.Team.map(team_dict['Citypoint']))
df.insert(10,"Opp_point", df.Opp.map(team_dict['Citypoint']))
df['Distance'] = df.apply(lambda x: geodesic(x['Team_point'],x['Opp_point']).km, axis=1)
df['Team_distance'] = df.apply(lambda x: 0 if x.Site == "home" else x.Distance, axis=1)
df['Opp_distance'] = df.apply(lambda x: 0 if x.Site == "away" else x.Distance, axis=1)
df = df.drop(['Team_point','Distance','Opp_point'], axis=1)
return df
#Function to count the current streak of home or games
def road_trips(df):
df.insert(4, "TeamChange", df["Team"].shift(1, fill_value=df["Team"].head(1)) != df["Team"])
df.insert(10, "Home_Stand", trips("home", df.TeamChange, df.Site))
df.insert(11, "Road_Trip", trips("away", df.TeamChange, df.Site))
df.Days_Rest = df.Days_Rest.dt.days
df.Days_Rest = df.Days_Rest.fillna(5)
df.Days_Rest = df.Days_Rest.astype(int)-1
df.loc[df.Days_Rest < 0, 'Days_Rest'] = 5
df = df.drop('TeamChange', axis=1)
return df
#Function to pair games into a singel record -- for O/U analysis
def opp_func (df):
df.insert(2,"Opp_Days_Rest", eda_df.Oppkey.map(opp_days_rest))
df.insert(10,"Opp_home_stand", eda_df.Oppkey.map(opp_home_stand))
df.insert(11,"Opp_road_trip", eda_df.Oppkey.map(opp_road_trip))
return df
#Func to calculate the unit return of each game and team
def unit_value(Line, Result):
if Line < 0 and Result == 'W':
return 1
elif Line < 0 and Result == 'L':
return Line/100
elif Line > 0 and Result == 'W':
return Line/100
elif Line > 0 and Result == 'L':
return -1
nhltable= pd.read_csv('nhltable.csv')
team_dict = nhltable.set_index('Team').to_dict()
eda_df = clean_data(master_df)
eda_df = distance_calc(eda_df)
eda_df = road_trips(eda_df)
#Adding Division
eda_df = pd.merge(eda_df, nhltable[['Team', 'Division']], on='Team', how="left" )
#Create keys for pairing
Teamkey = []
Oppkey = []
for i in range(len(eda_df.Date)):
Teamkey.append(str(covert_dates(eda_df.Date[i], eda_df.Date[i])[0])+eda_df.Team[i])
Oppkey.append(str(covert_dates(eda_df.Date[i], eda_df.Date[i])[0])+eda_df.Opp[i])
eda_df['Oppkey'] = Oppkey
opp_days_rest = dict(zip(Teamkey, eda_df.Days_Rest))
opp_home_stand = dict(zip(Teamkey, eda_df.Home_Stand))
opp_road_trip = dict(zip(Teamkey, eda_df.Road_Trip))
opp_func(eda_df)
eda_df.Final = eda_df.Final.fillna('0-0')
eda_df = eda_df.fillna(0)
eda_df = pd.concat([eda_df, pd.get_dummies(eda_df.OUr)], axis=1)
goals_df = eda_df['Final'].str.split('-', expand=True).rename(columns={0:'Team_Goals', 1:'Opp_Goals'}).astype(int)
eda_df = pd.concat([eda_df, goals_df], axis=1)
eda_df['total_O'] = eda_df.groupby('Team')['O'].cumsum()
eda_df['total_U'] = eda_df.groupby('Team')['U'].cumsum()
eda_df['total_P'] = eda_df.groupby('Team')['P'].cumsum()
eda_df['total_Team_goals'] = eda_df.groupby('Team')['Team_Goals'].cumsum()
eda_df['total_Opp_goals'] = eda_df.groupby('Team')['Opp_Goals'].cumsum()
#eda_df = eda_df.loc[eda_df['OUr']!='P']
#eda_df['y'] = (eda_df.OUr=='O').astype(int)
eda_df['Team_U'] = eda_df.groupby('Team')['total_U'].transform('max')
eda_df['Team_O'] = eda_df.groupby('Team')['total_O'].transform('max')
eda_df['Opp_U'] = eda_df.groupby('Opp')['total_U'].transform('max')
eda_df['Opp_O'] = eda_df.groupby('Opp')['total_O'].transform('max')
eda_df['Team_Goals_Scored'] = eda_df.groupby('Team')['total_Team_goals'].transform('max')
eda_df['Team_Goals_Allowed'] = eda_df.groupby('Team')['total_Opp_goals'].transform('max')
eda_df['Opp_Goals_Scored'] = eda_df.groupby('Opp')['total_Team_goals'].transform('max')
eda_df['Opp_Goals_Allowed'] = eda_df.groupby('Opp')['total_Opp_goals'].transform('max')
#eda_df['Units'] = eda_df.apply(lambda x: unit_value(x.Line, x.SUr), axis=1)
#Tonight's games data
today_np = np.datetime64(today)
tonight_df= eda_df[['Team','Opp','Total','Home_Stand','Opp_road_trip','Days_Rest','Opp_Days_Rest', 'Opp_distance', 'Team_U',
'Opp_U','Team_O', 'Opp_O','Team_Goals_Scored', 'Opp_Goals_Scored','Team_Goals_Allowed', 'Opp_Goals_Allowed', "Date",'Site']]
tonight_df = tonight_df.loc[(tonight_df['Date']==today_np) & (tonight_df['Site']=='home')].reset_index(drop=True)
#Seperating the two EDA dataframes
eda_OU = eda_df.loc[(eda_df['Site']=='home') & (eda_df['Date']<today_np)]
eda_OU.insert(3, "Combined_Rest", eda_OU.loc[:,'Days_Rest'] + eda_OU.loc[:,'Opp_Days_Rest'])
cut_labels = [500, 1000, 1500, 2000, 3000, 4000]
cut_bins = [0, 500, 1000, 1500, 2000, 3000, 4000]
eda_OU['Distance'] = pd.cut(eda_OU.loc[:,'Opp_distance'], bins=cut_bins, labels= cut_labels)
eda_OU = eda_OU.sort_values('Date').reset_index(drop=True)
# Notify user that the data was successfully loaded.
data_load_state.text('Checking and Fetching Data...Done & done!')
st.write("Check out this [link to the sister site for Team Analysis](https://share.streamlit.io/jfm-data/nhlwagers/main/streamlit_Team.py)")
#############################################
### Streamlit Design ######################
############################################
st.subheader("Tonight's Games")
#st.dataframe(tonight_df.style.background_gradient(cmap='viridis', low=0.7, high=0).set_precision(1))
df1 = tonight_df.style.background_gradient(cmap='viridis', low=0.7, high=0).set_precision(1)
df2 = tonight_df.iloc[:,:3].style.set_precision(1)
st.table(df2)
st.dataframe(df1)
######################
## Space for Machine Learning Model
####################
st.subheader('Predictions')
st.write('*Coming soon....* :sunglasses:')
st.header('O/U Analysis')
date_select = st.slider(
"Select Dates",
datetime.date(2021,1,13), yesterday,
value=(datetime.date(2021,1,13), yesterday),
format="MM/DD/YY")
st.write("Start time:", date_select[0])
st.write("Endtime:", date_select[1])
filtered_df= eda_OU[(eda_OU['Date'] >= np.datetime64(date_select[0]))
& (eda_OU['Date'] <= np.datetime64(date_select[1]))]
#st.subheader('Overall')
fig_OU = px.histogram(filtered_df, x="Total", color='OUr',
barmode='group', template='plotly_dark', title="Totals",
color_discrete_map={
"O":"#FF9F1C",
"U":"#011627",
"P":"#2EC4B6"})
st.plotly_chart(fig_OU, use_container_width=True)
#st.subheader('By Combined Days Rest')
fig_DaysRest = px.histogram(filtered_df[filtered_df["Combined_Rest"] <10],
x="Combined_Rest", color='OUr', title='Test',
barmode='group', template='plotly_dark', color_discrete_map={
"O":"#FF9F1C",
"U":"#011627",
"P":"#2EC4B6"})
st.plotly_chart(fig_DaysRest, use_container_width=True)
#st.subheader('By Distance of Road Team from Home')
fig3 = px.histogram(filtered_df, x="Distance", color='OUr',
barmode='group', template='plotly_dark',title='By Distance of Road Team from Home',
color_discrete_map={
"O":"#FF9F1C",
"U":"#011627",
"P":"#2EC4B6"})
st.plotly_chart(fig3, use_container_width=True)
#st.subheader('By Length of Road Trip')
fig4 = px.histogram(filtered_df, x="Opp_road_trip", color='OUr',
barmode='group', template='plotly_dark', title='By Length of Road Trip',
color_discrete_map={
"O":"#FF9F1C",
"U":"#011627",
"P":"#2EC4B6"})
st.plotly_chart(fig4, use_container_width=True)
#st.subheader('By Length of Home Stand')
fig5 = px.histogram(filtered_df, x="Home_Stand", color='OUr',title='By Length of Home Stand',
barmode='group', template='plotly_dark', color_discrete_map={
"O":"#FF9F1C",
"U":"#011627",
"P":"#2EC4B6"})
st.plotly_chart(fig5, use_container_width=True)
st.subheader('Division Analysis')
div_select = st.selectbox("Select Division?",
list(pd.unique(eda_df.Division)))
div_filter = eda_df[eda_df['Division']==div_select]
fig_OU = px.histogram(div_filter, x="Team", color='OUr', barmode='group', template= 'simple_white', title="Totals",
color_discrete_map={
"O":"#FF9F1C",
"U":"#011627",
"P":"#2EC4B6"})
st.plotly_chart(fig_OU, use_container_width=True)
#st.subheader('Select Parameters for situational outputs')
#Filtering For Days of Rest
#Days_to_filter = st.slider('Days of Rest', 0, max(eda_OU.Days_Rest), 3)
#st.text('Number of Days Rest %s' % Days_to_filter)
#filtered_data = eda_OU[eda_OU['Days_Rest'] == Days_to_filter]
#Filtering For Distance
#Distance_to_filter = st.slider('Distance of Opponent', 0.0, max(data.Distance), (0.0, 500.0))
#st.text('Distance From Home %s' % Distance_to_filter[0])
#filtered_data = filtered_data[(filtered_data['Distance'] >= Distance_to_filter[0]) & (filtered_data['Distance'] <= Distance_to_filter[1])]
# #Filtering For Home and Away
# st.header('O/U Team Analysis -- TO BE MOVED')
# team_select = st.selectbox("Select Team",
# list(pd.unique(eda_df.Team)))
# st.write('You selected', team_select)
# filtered_data = eda_df[eda_df['Team'] == team_select]
# home_away = st.selectbox("Is the Team Home or Away?",
# ('home', 'away'))
# st.write('You selected', home_away)
# filtered_data = filtered_data[filtered_data['Site'] == home_away]
# days_rest = st.slider('Days Rest', 0, 5, 2)
# filtered_data = filtered_data[filtered_data['Days_Rest'] == days_rest]
# st.subheader('O/U by Selected Inputs')
# fig_OU_team = px.histogram(filtered_data, x="Total", color='OUr',
# barmode='group', template='plotly_dark')
# st.plotly_chart(fig_OU_team, use_container_width=True)
#Filtering For Distance
#Distance_to_filter = st.slider('Distance From Home', 0.0, max(data.Distance), (0.0, 500.0))
#st.text('Distance From Home %s' % Distance_to_filter[0])
#filtered_data = filtered_data[(filtered_data['Distance'] >= Distance_to_filter[0]) & (filtered_data['Distance'] <= Distance_to_filter[1])]
#st.subheader('Selected # of Days on Home Stand')
#st.subheader('Selected # of Days on Road Trip')
#if genre == 'Comedy':
# st.write('You selected comedy.')
#else:
# st.write("You didn't select comedy.")
#fig = px.histogram(data, x="Date_diff", color='OUr',
# barmode='group', template='plotly_white')
#st.plotly_chart(fig, use_container_width=True)
#st.subheader('Home Stand O/U Results')
#fig1 = px.histogram(data[data["Home_Stand"]>0], x="Home_Stand", color='OUr',
# barmode='group', template='plotly_white')
#st.plotly_chart(fig1, use_container_width=True)
#st.subheader('Road Trip O/U Results')
#fig2 = px.histogram(data[data["Road_Trip"]>0], x="Road_Trip", color='OUr',
# barmode='group', template='plotly_white')
#st.plotly_chart(fig2, use_container_width=True)
st.text("Raw Data")
st.dataframe(eda_df.iloc[:,1:])
st.header('Unit Analysis')
unit_team = st.selectbox("Select Team for Unit",
list(pd.unique(eda_df.Team)))
st.write('You selected', unit_team)
#Filter for OU Line
#Line_to_filter = st.slider('Unit Line', 0.0, max(eda_OU.Total), (0.0, 5.5))
#filtered_data2 = filtered_data[(eda_OU['Total'] >= Line_to_filter[0]) &
# (eda_OU['Total'] <= Line_to_filter[1])]
| [
"streamlit.table",
"pandas.read_csv",
"urllib.request.Request",
"time.sleep",
"datetime.timedelta",
"streamlit.header",
"pandas.date_range",
"pandas.to_datetime",
"pandas.unique",
"datetime.date",
"numpy.datetime64",
"streamlit.set_page_config",
"pandas.DataFrame",
"urllib.request.urlopen"... | [((449, 484), 'geopy.geocoders.Nominatim', 'Nominatim', ([], {'user_agent': '"""myuseragent"""'}), "(user_agent='myuseragent')\n", (458, 484), False, 'from geopy.geocoders import Nominatim\n'), ((669, 768), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""O/U Hockey Analytics"""', 'page_icon': '""":ice_hockey_stick_and_puck:"""'}), "(page_title='O/U Hockey Analytics', page_icon=\n ':ice_hockey_stick_and_puck:')\n", (687, 768), True, 'import streamlit as st\n'), ((832, 1003), 'streamlit.markdown', 'st.markdown', (['"""<Head>\n <Title> Test Title</Title><link rel="shortcut icon" href="favicon.ico" type="image/x-icon"> </Head>"""'], {'unsafe_allow_html': '(True)'}), '(\n """<Head>\n <Title> Test Title</Title><link rel="shortcut icon" href="favicon.ico" type="image/x-icon"> </Head>"""\n , unsafe_allow_html=True)\n', (843, 1003), True, 'import streamlit as st\n'), ((1016, 1360), 'streamlit.markdown', 'st.markdown', (['"""<h1 style="text-align:center;color:white;font-weight:bolder;font-size:70px;font-family:helvetica; background:\n -webkit-linear-gradient(#a73305,#000000,#093ff0); -webkit-background-clip:\n text;-webkit-text-fill-color: transparent;">NHL<br>Wager<br>Analytics</h1>"""'], {'unsafe_allow_html': '(True)'}), '(\n """<h1 style="text-align:center;color:white;font-weight:bolder;font-size:70px;font-family:helvetica; background:\n -webkit-linear-gradient(#a73305,#000000,#093ff0); -webkit-background-clip:\n text;-webkit-text-fill-color: transparent;">NHL<br>Wager<br>Analytics</h1>"""\n , unsafe_allow_html=True)\n', (1027, 1360), True, 'import streamlit as st\n'), ((1391, 1431), 'streamlit.text', 'st.text', (['"""Checking and Fetching Data..."""'], {}), "('Checking and Fetching Data...')\n", (1398, 1431), True, 'import streamlit as st\n'), ((1566, 1594), 'pandas.read_csv', 'pd.read_csv', (['"""master_df.csv"""'], {}), "('master_df.csv')\n", (1577, 1594), True, 'import pandas as pd\n'), ((1737, 1758), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1756, 1758), False, 'import datetime\n'), ((5569, 5596), 'pandas.read_csv', 'pd.read_csv', (['"""nhltable.csv"""'], {}), "('nhltable.csv')\n", (5580, 5596), True, 'import pandas as pd\n'), ((5772, 5843), 'pandas.merge', 'pd.merge', (['eda_df', "nhltable[['Team', 'Division']]"], {'on': '"""Team"""', 'how': '"""left"""'}), "(eda_df, nhltable[['Team', 'Division']], on='Team', how='left')\n", (5780, 5843), True, 'import pandas as pd\n'), ((6608, 6645), 'pandas.concat', 'pd.concat', (['[eda_df, goals_df]'], {'axis': '(1)'}), '([eda_df, goals_df], axis=1)\n', (6617, 6645), True, 'import pandas as pd\n'), ((7817, 7837), 'numpy.datetime64', 'np.datetime64', (['today'], {}), '(today)\n', (7830, 7837), True, 'import numpy as np\n'), ((8557, 8628), 'pandas.cut', 'pd.cut', (["eda_OU.loc[:, 'Opp_distance']"], {'bins': 'cut_bins', 'labels': 'cut_labels'}), "(eda_OU.loc[:, 'Opp_distance'], bins=cut_bins, labels=cut_labels)\n", (8563, 8628), True, 'import pandas as pd\n'), ((8823, 8973), 'streamlit.write', 'st.write', (['"""Check out this [link to the sister site for Team Analysis](https://share.streamlit.io/jfm-data/nhlwagers/main/streamlit_Team.py)"""'], {}), "(\n 'Check out this [link to the sister site for Team Analysis](https://share.streamlit.io/jfm-data/nhlwagers/main/streamlit_Team.py)'\n )\n", (8831, 8973), True, 'import streamlit as st\n'), ((9113, 9144), 'streamlit.subheader', 'st.subheader', (['"""Tonight\'s Games"""'], {}), '("Tonight\'s Games")\n', (9125, 9144), True, 'import streamlit as st\n'), ((9397, 9410), 'streamlit.table', 'st.table', (['df2'], {}), '(df2)\n', (9405, 9410), True, 'import streamlit as st\n'), ((9412, 9429), 'streamlit.dataframe', 'st.dataframe', (['df1'], {}), '(df1)\n', (9424, 9429), True, 'import streamlit as st\n'), ((9519, 9546), 'streamlit.subheader', 'st.subheader', (['"""Predictions"""'], {}), "('Predictions')\n", (9531, 9546), True, 'import streamlit as st\n'), ((9548, 9590), 'streamlit.write', 'st.write', (['"""*Coming soon....* :sunglasses:"""'], {}), "('*Coming soon....* :sunglasses:')\n", (9556, 9590), True, 'import streamlit as st\n'), ((9594, 9619), 'streamlit.header', 'st.header', (['"""O/U Analysis"""'], {}), "('O/U Analysis')\n", (9603, 9619), True, 'import streamlit as st\n'), ((9790, 9829), 'streamlit.write', 'st.write', (['"""Start time:"""', 'date_select[0]'], {}), "('Start time:', date_select[0])\n", (9798, 9829), True, 'import streamlit as st\n'), ((9831, 9867), 'streamlit.write', 'st.write', (['"""Endtime:"""', 'date_select[1]'], {}), "('Endtime:', date_select[1])\n", (9839, 9867), True, 'import streamlit as st\n'), ((10060, 10244), 'plotly.express.histogram', 'px.histogram', (['filtered_df'], {'x': '"""Total"""', 'color': '"""OUr"""', 'barmode': '"""group"""', 'template': '"""plotly_dark"""', 'title': '"""Totals"""', 'color_discrete_map': "{'O': '#FF9F1C', 'U': '#011627', 'P': '#2EC4B6'}"}), "(filtered_df, x='Total', color='OUr', barmode='group', template\n ='plotly_dark', title='Totals', color_discrete_map={'O': '#FF9F1C', 'U':\n '#011627', 'P': '#2EC4B6'})\n", (10072, 10244), True, 'import plotly.express as px\n'), ((10294, 10343), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig_OU'], {'use_container_width': '(True)'}), '(fig_OU, use_container_width=True)\n', (10309, 10343), True, 'import streamlit as st\n'), ((10402, 10632), 'plotly.express.histogram', 'px.histogram', (["filtered_df[filtered_df['Combined_Rest'] < 10]"], {'x': '"""Combined_Rest"""', 'color': '"""OUr"""', 'title': '"""Test"""', 'barmode': '"""group"""', 'template': '"""plotly_dark"""', 'color_discrete_map': "{'O': '#FF9F1C', 'U': '#011627', 'P': '#2EC4B6'}"}), "(filtered_df[filtered_df['Combined_Rest'] < 10], x=\n 'Combined_Rest', color='OUr', title='Test', barmode='group', template=\n 'plotly_dark', color_discrete_map={'O': '#FF9F1C', 'U': '#011627', 'P':\n '#2EC4B6'})\n", (10414, 10632), True, 'import plotly.express as px\n'), ((10683, 10738), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig_DaysRest'], {'use_container_width': '(True)'}), '(fig_DaysRest, use_container_width=True)\n', (10698, 10738), True, 'import streamlit as st\n'), ((10804, 11018), 'plotly.express.histogram', 'px.histogram', (['filtered_df'], {'x': '"""Distance"""', 'color': '"""OUr"""', 'barmode': '"""group"""', 'template': '"""plotly_dark"""', 'title': '"""By Distance of Road Team from Home"""', 'color_discrete_map': "{'O': '#FF9F1C', 'U': '#011627', 'P': '#2EC4B6'}"}), "(filtered_df, x='Distance', color='OUr', barmode='group',\n template='plotly_dark', title='By Distance of Road Team from Home',\n color_discrete_map={'O': '#FF9F1C', 'U': '#011627', 'P': '#2EC4B6'})\n", (10816, 11018), True, 'import plotly.express as px\n'), ((11066, 11113), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig3'], {'use_container_width': '(True)'}), '(fig3, use_container_width=True)\n', (11081, 11113), True, 'import streamlit as st\n'), ((11167, 11374), 'plotly.express.histogram', 'px.histogram', (['filtered_df'], {'x': '"""Opp_road_trip"""', 'color': '"""OUr"""', 'barmode': '"""group"""', 'template': '"""plotly_dark"""', 'title': '"""By Length of Road Trip"""', 'color_discrete_map': "{'O': '#FF9F1C', 'U': '#011627', 'P': '#2EC4B6'}"}), "(filtered_df, x='Opp_road_trip', color='OUr', barmode='group',\n template='plotly_dark', title='By Length of Road Trip',\n color_discrete_map={'O': '#FF9F1C', 'U': '#011627', 'P': '#2EC4B6'})\n", (11179, 11374), True, 'import plotly.express as px\n'), ((11423, 11470), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig4'], {'use_container_width': '(True)'}), '(fig4, use_container_width=True)\n', (11438, 11470), True, 'import streamlit as st\n'), ((11523, 11729), 'plotly.express.histogram', 'px.histogram', (['filtered_df'], {'x': '"""Home_Stand"""', 'color': '"""OUr"""', 'title': '"""By Length of Home Stand"""', 'barmode': '"""group"""', 'template': '"""plotly_dark"""', 'color_discrete_map': "{'O': '#FF9F1C', 'U': '#011627', 'P': '#2EC4B6'}"}), "(filtered_df, x='Home_Stand', color='OUr', title=\n 'By Length of Home Stand', barmode='group', template='plotly_dark',\n color_discrete_map={'O': '#FF9F1C', 'U': '#011627', 'P': '#2EC4B6'})\n", (11535, 11729), True, 'import plotly.express as px\n'), ((11756, 11803), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig5'], {'use_container_width': '(True)'}), '(fig5, use_container_width=True)\n', (11771, 11803), True, 'import streamlit as st\n'), ((11809, 11842), 'streamlit.subheader', 'st.subheader', (['"""Division Analysis"""'], {}), "('Division Analysis')\n", (11821, 11842), True, 'import streamlit as st\n'), ((12009, 12192), 'plotly.express.histogram', 'px.histogram', (['div_filter'], {'x': '"""Team"""', 'color': '"""OUr"""', 'barmode': '"""group"""', 'template': '"""simple_white"""', 'title': '"""Totals"""', 'color_discrete_map': "{'O': '#FF9F1C', 'U': '#011627', 'P': '#2EC4B6'}"}), "(div_filter, x='Team', color='OUr', barmode='group', template=\n 'simple_white', title='Totals', color_discrete_map={'O': '#FF9F1C', 'U':\n '#011627', 'P': '#2EC4B6'})\n", (12021, 12192), True, 'import plotly.express as px\n'), ((12222, 12271), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig_OU'], {'use_container_width': '(True)'}), '(fig_OU, use_container_width=True)\n', (12237, 12271), True, 'import streamlit as st\n'), ((14920, 14939), 'streamlit.text', 'st.text', (['"""Raw Data"""'], {}), "('Raw Data')\n", (14927, 14939), True, 'import streamlit as st\n'), ((14941, 14973), 'streamlit.dataframe', 'st.dataframe', (['eda_df.iloc[:, 1:]'], {}), '(eda_df.iloc[:, 1:])\n', (14953, 14973), True, 'import streamlit as st\n'), ((14976, 15002), 'streamlit.header', 'st.header', (['"""Unit Analysis"""'], {}), "('Unit Analysis')\n", (14985, 15002), True, 'import streamlit as st\n'), ((15102, 15137), 'streamlit.write', 'st.write', (['"""You selected"""', 'unit_team'], {}), "('You selected', unit_team)\n", (15110, 15137), True, 'import streamlit as st\n'), ((1701, 1727), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1719, 1727), False, 'import datetime\n'), ((1778, 1804), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1796, 1804), False, 'import datetime\n'), ((1916, 1953), 'pandas.date_range', 'pd.date_range', (['date1', 'date2'], {'freq': '"""d"""'}), "(date1, date2, freq='d')\n", (1929, 1953), True, 'import pandas as pd\n'), ((2160, 2174), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2172, 2174), True, 'import pandas as pd\n'), ((2835, 2867), 'pandas.concat', 'pd.concat', (['[master_df, new_data]'], {}), '([master_df, new_data])\n', (2844, 2867), True, 'import pandas as pd\n'), ((2991, 3014), 'pandas.to_datetime', 'pd.to_datetime', (['df.Date'], {}), '(df.Date)\n', (3005, 3014), True, 'import pandas as pd\n'), ((9676, 9702), 'datetime.date', 'datetime.date', (['(2021)', '(1)', '(13)'], {}), '(2021, 1, 13)\n', (9689, 9702), False, 'import datetime\n'), ((2403, 2429), 'urllib.request.Request', 'Request', (['site'], {'headers': 'hdr'}), '(site, headers=hdr)\n', (2410, 2429), False, 'from urllib.request import Request, urlopen\n'), ((2450, 2462), 'urllib.request.urlopen', 'urlopen', (['req'], {}), '(req)\n', (2457, 2462), False, 'from urllib.request import Request, urlopen\n'), ((2483, 2502), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page'], {}), '(page)\n', (2496, 2502), False, 'from bs4 import BeautifulSoup\n'), ((2643, 2671), 'pandas.concat', 'pd.concat', (['[new_df, page_df]'], {}), '([new_df, page_df])\n', (2652, 2671), True, 'import pandas as pd\n'), ((2685, 2698), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2695, 2698), False, 'import time\n'), ((6441, 6467), 'pandas.get_dummies', 'pd.get_dummies', (['eda_df.OUr'], {}), '(eda_df.OUr)\n', (6455, 6467), True, 'import pandas as pd\n'), ((11913, 11939), 'pandas.unique', 'pd.unique', (['eda_df.Division'], {}), '(eda_df.Division)\n', (11922, 11939), True, 'import pandas as pd\n'), ((15076, 15098), 'pandas.unique', 'pd.unique', (['eda_df.Team'], {}), '(eda_df.Team)\n', (15085, 15098), True, 'import pandas as pd\n'), ((9726, 9752), 'datetime.date', 'datetime.date', (['(2021)', '(1)', '(13)'], {}), '(2021, 1, 13)\n', (9739, 9752), False, 'import datetime\n'), ((9910, 9939), 'numpy.datetime64', 'np.datetime64', (['date_select[0]'], {}), '(date_select[0])\n', (9923, 9939), True, 'import numpy as np\n'), ((9986, 10015), 'numpy.datetime64', 'np.datetime64', (['date_select[1]'], {}), '(date_select[1])\n', (9999, 10015), True, 'import numpy as np\n'), ((4039, 4080), 'geopy.distance.geodesic', 'geodesic', (["x['Team_point']", "x['Opp_point']"], {}), "(x['Team_point'], x['Opp_point'])\n", (4047, 4080), False, 'from geopy.distance import geodesic\n'), ((1647, 1682), 'pandas.to_datetime', 'pd.to_datetime', (['master_df.Date[-1:]'], {}), '(master_df.Date[-1:])\n', (1661, 1682), True, 'import pandas as pd\n')] |
import numpy as np
from math import floor, ceil
import torch
from torch import nn
import torch.nn.functional as F
import utils.loggers as lg
class Residual_CNN(nn.Module):
def __init__(self, learning_rate, input_dim, output_dim, hidden_layers, device):
super().__init__()
self._device = device
self._learning_rate = learning_rate
self._input_dim = list(input_dim)
self._output_dim = output_dim
self._hidden_layers = hidden_layers
self._num_layers = len(hidden_layers)
self._activation = nn.LeakyReLU(0.3)
self._conv_layer = self._construct_conv2d(self._input_dim, hidden_layers[0]['filters'], hidden_layers[0]['kernel_size'])
self._conv_layer_normalization = nn.BatchNorm2d(hidden_layers[0]['filters'])
self._residual_conv_layer_1 = []
self._residual_conv_layer_2 = []
self._residual_normalization = []
if self._num_layers > 1:
for i, h in enumerate(hidden_layers[1:]):
self._input_dim[-3] = hidden_layers[i - 1]['filters']
self._residual_conv_layer_1.append(self._construct_conv2d(self._input_dim, h['filters'], h['kernel_size']).to(device))
self._input_dim[-3] = h['filters']
self._residual_conv_layer_2.append(self._construct_conv2d(self._input_dim, h['filters'], h['kernel_size']).to(device))
self._residual_normalization.append(nn.BatchNorm2d(h['filters']).to(device))
self._input_dim[-3] = hidden_layers[-1]['filters']
flatten_length = self._input_dim[-2] * self._input_dim[-1]
self._vh_conv_layer = self._construct_conv2d(self._input_dim, 1, (1, 1))
self._vh_normalization = nn.BatchNorm2d(1)
self._vh_dense_1 = nn.Linear(flatten_length, 20, bias=False)
self._vh_dense_2 = nn.Linear(20, 1, bias=False)
self._ph_conv_layer = self._construct_conv2d(self._input_dim, 2, (1, 1))
self._ph_normalization = nn.BatchNorm2d(2)
self._ph_dense = nn.Linear(2 * flatten_length, output_dim, bias=False)
self._input_dim = input_dim
def _calc_padding_margin(self, kernel_size):
same_pad_u = floor((kernel_size[-2] - 1) / 2.0)
same_pad_b = ceil((kernel_size[-2] - 1) / 2.0)
same_pad_l = floor((kernel_size[-1] - 1) / 2.0)
same_pad_r = ceil((kernel_size[-1] - 1) / 2.0)
return (same_pad_l, same_pad_r, same_pad_u, same_pad_b)
def _construct_conv2d(self, x, filters, kernel_size):
func = nn.Conv2d(
in_channels=x[-3],
out_channels=filters,
kernel_size=kernel_size,
bias=False
)
return func
def forward(self, x, y=None):
bs = x.shape[0]
x = torch.from_numpy(x)
x = x.to(self._device)
x = F.pad(x, self._calc_padding_margin(self._hidden_layers[0]['kernel_size']))
x = self._conv_layer(x)
x = self._conv_layer_normalization(x)
x = self._activation(x)
for i in range(self._num_layers-1):
input_block = x
x = F.pad(x, self._calc_padding_margin(self._hidden_layers[i]['kernel_size']))
x = self._residual_conv_layer_1[i](x)
x = self._residual_normalization[i](x)
x = self._activation(x)
x = F.pad(x, self._calc_padding_margin(self._hidden_layers[i]['kernel_size']))
x = self._residual_conv_layer_2[i](x)
x = self._residual_normalization[i](x)
x = input_block + x
x = self._activation(x)
vh = self._vh_conv_layer(x)
vh = self._vh_normalization(vh)
vh = self._activation(vh)
vh = vh.view(bs, -1)
vh = self._vh_dense_1(vh)
vh = self._activation(vh)
vh = self._vh_dense_2(vh)
vh = torch.tanh(vh)
ph = self._ph_conv_layer(x)
ph = self._ph_normalization(ph)
ph = self._activation(ph)
ph = ph.view(bs, -1)
ph = self._ph_dense(ph)
# model = Model(inputs=[main_input], outputs=[vh, ph])
# model.compile(loss={'value_head': 'mean_squared_error', 'policy_head': softmax_cross_entropy_with_logits},
# optimizer=SGD(lr=self.learning_rate, momentum = config.MOMENTUM),
# loss_weights={'value_head': 0.5, 'policy_head': 0.5}
# )
if y is None:
return vh, ph
v_loss_func = nn.MSELoss(reduction='none')
p_loss_func = nn.CrossEntropyLoss(reduction='none')
v_loss = v_loss_func(vh.view(-1), torch.from_numpy(y['value_head']).to(self._device))
p_loss = p_loss_func(ph, torch.argmax(torch.from_numpy(y['policy_head']).to(self._device), dim=1))
history = {}
history['loss'] = 0.5 * v_loss + 0.5 * p_loss
history['value_head_loss'] = v_loss
history['policy_head_loss'] = p_loss
return history
def convertToModelInput(self, state):
inputToModel = state.binary #np.append(state.binary, [(state.playerTurn + 1)/2] * self.input_dim[1] * self.input_dim[2])
inputToModel = np.reshape(inputToModel, self._input_dim)
return inputToModel
| [
"torch.tanh",
"torch.nn.BatchNorm2d",
"math.ceil",
"torch.nn.CrossEntropyLoss",
"torch.nn.LeakyReLU",
"math.floor",
"numpy.reshape",
"torch.from_numpy",
"torch.nn.Conv2d",
"torch.nn.MSELoss",
"torch.nn.Linear"
] | [((560, 577), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.3)'], {}), '(0.3)\n', (572, 577), False, 'from torch import nn\n'), ((748, 791), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (["hidden_layers[0]['filters']"], {}), "(hidden_layers[0]['filters'])\n", (762, 791), False, 'from torch import nn\n'), ((1727, 1744), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1)'], {}), '(1)\n', (1741, 1744), False, 'from torch import nn\n'), ((1772, 1813), 'torch.nn.Linear', 'nn.Linear', (['flatten_length', '(20)'], {'bias': '(False)'}), '(flatten_length, 20, bias=False)\n', (1781, 1813), False, 'from torch import nn\n'), ((1841, 1869), 'torch.nn.Linear', 'nn.Linear', (['(20)', '(1)'], {'bias': '(False)'}), '(20, 1, bias=False)\n', (1850, 1869), False, 'from torch import nn\n'), ((1984, 2001), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(2)'], {}), '(2)\n', (1998, 2001), False, 'from torch import nn\n'), ((2027, 2080), 'torch.nn.Linear', 'nn.Linear', (['(2 * flatten_length)', 'output_dim'], {'bias': '(False)'}), '(2 * flatten_length, output_dim, bias=False)\n', (2036, 2080), False, 'from torch import nn\n'), ((2192, 2226), 'math.floor', 'floor', (['((kernel_size[-2] - 1) / 2.0)'], {}), '((kernel_size[-2] - 1) / 2.0)\n', (2197, 2226), False, 'from math import floor, ceil\n'), ((2248, 2281), 'math.ceil', 'ceil', (['((kernel_size[-2] - 1) / 2.0)'], {}), '((kernel_size[-2] - 1) / 2.0)\n', (2252, 2281), False, 'from math import floor, ceil\n'), ((2303, 2337), 'math.floor', 'floor', (['((kernel_size[-1] - 1) / 2.0)'], {}), '((kernel_size[-1] - 1) / 2.0)\n', (2308, 2337), False, 'from math import floor, ceil\n'), ((2359, 2392), 'math.ceil', 'ceil', (['((kernel_size[-1] - 1) / 2.0)'], {}), '((kernel_size[-1] - 1) / 2.0)\n', (2363, 2392), False, 'from math import floor, ceil\n'), ((2531, 2622), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'x[-3]', 'out_channels': 'filters', 'kernel_size': 'kernel_size', 'bias': '(False)'}), '(in_channels=x[-3], out_channels=filters, kernel_size=kernel_size,\n bias=False)\n', (2540, 2622), False, 'from torch import nn\n'), ((2772, 2791), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (2788, 2791), False, 'import torch\n'), ((3853, 3867), 'torch.tanh', 'torch.tanh', (['vh'], {}), '(vh)\n', (3863, 3867), False, 'import torch\n'), ((4463, 4491), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (4473, 4491), False, 'from torch import nn\n'), ((4514, 4551), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (4533, 4551), False, 'from torch import nn\n'), ((5145, 5186), 'numpy.reshape', 'np.reshape', (['inputToModel', 'self._input_dim'], {}), '(inputToModel, self._input_dim)\n', (5155, 5186), True, 'import numpy as np\n'), ((4594, 4627), 'torch.from_numpy', 'torch.from_numpy', (["y['value_head']"], {}), "(y['value_head'])\n", (4610, 4627), False, 'import torch\n'), ((4692, 4726), 'torch.from_numpy', 'torch.from_numpy', (["y['policy_head']"], {}), "(y['policy_head'])\n", (4708, 4726), False, 'import torch\n'), ((1446, 1474), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (["h['filters']"], {}), "(h['filters'])\n", (1460, 1474), False, 'from torch import nn\n')] |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# csv paths
colorCSV = pd.read_csv('../input/style-classifier/Multi_Label_dataset/Tasks/color.csv')
dofCSV = pd.read_csv('../input/style-classifier/Multi_Label_dataset/Tasks/dof.csv')
paletteCSV = pd.read_csv('../input/style-classifier/Multi_Label_dataset/Tasks/palette.csv')
compositionCSV = pd.read_csv('../input/style-classifier/Multi_Label_dataset/Tasks/composition.csv')
typeCSV = pd.read_csv('../input/style-classifier/Multi_Label_dataset/Tasks/type.csv')
######################################################### Color ########################################################
colorful = colorCSV['Colorful'].sum()
black_and_white = colorCSV['Black and White'].sum()
labels = ['Colorful', 'Black & White']
counts = np.array([colorful, black_and_white])
fig1, ax = plt.subplots(figsize=(15, 15))
y_pos = np.arange(len(labels))
ax.barh(y_pos, counts, align='center')
ax.set_yticks(y_pos)
ax.set_yticklabels(labels)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Counts')
ax.set_title('Color')
plt.savefig('../Plots/colorStats.png')
plt.show()
######################################################### Color ########################################################
########################################################## DoF #########################################################
deep = dofCSV['Deep'].sum()
shallow = dofCSV['Shallow'].sum()
labels = ['Deep', 'Shallow']
counts = np.array([deep, shallow])
fig2, ax = plt.subplots(figsize=(15, 15))
y_pos = np.arange(len(labels))
ax.barh(y_pos, counts, align='center')
ax.set_yticks(y_pos)
ax.set_yticklabels(labels)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Counts')
ax.set_title('DoF')
plt.savefig('../Plots/dofStats.png')
plt.show()
########################################################## DoF #########################################################
####################################################### Palette ########################################################
black = paletteCSV['Black'].sum()
blue = paletteCSV['Blue'].sum()
yellow = paletteCSV['Yellow'].sum()
green = paletteCSV['Green'].sum()
white = paletteCSV['White'].sum()
other = paletteCSV['Other'].sum()
gray = paletteCSV['Gray'].sum()
brown = paletteCSV['Brown'].sum()
red = paletteCSV['Red'].sum()
orange = paletteCSV['Orange'].sum()
human_skin = paletteCSV['Human Skin'].sum()
pink = paletteCSV['Pink'].sum()
violet = paletteCSV['Violet'].sum()
labels = ['Black', 'Blue', 'Yellow', 'Green', 'White', 'Other', 'Gray', 'Brown', 'Red', 'Orange', 'Human Skin',
'Pink', 'Violet']
counts = np.array([black, blue, yellow, green, white, other, gray, brown, red, orange, human_skin, pink, violet])
fig3, ax = plt.subplots(figsize=(15, 15))
y_pos = np.arange(len(labels))
ax.barh(y_pos, counts, align='center')
ax.set_yticks(y_pos)
ax.set_yticklabels(labels)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Counts')
ax.set_title('Palette')
plt.savefig('../Plots/paletteStats.png')
plt.show()
####################################################### Palette ########################################################
##################################################### Composition ######################################################
undefined = compositionCSV['Undefined'].sum()
rule_of_thirds = compositionCSV['Rule of Thirds'].sum()
centered = compositionCSV['Centered'].sum()
leading_lines = compositionCSV['Leading Lines'].sum()
diagonals_and_triangles = compositionCSV['Diagonals and Triangles'].sum()
patterns_and_textures = compositionCSV['Patterns and Textures'].sum()
frame_within_frame = compositionCSV['Frame within Frame'].sum()
symmetrical = compositionCSV['Symmetrical'].sum()
minimal = compositionCSV['Minimal'].sum()
filling_the_frame = compositionCSV['Filling the Frame'].sum()
labels = ['Undefined', 'Rule of Thirds', 'Centered', 'Leading Lines', 'Diagonals and Triangles',
'Patterns and Textures', 'Frame within Frame', 'Symmetrical', 'Minimal', 'Filling the Frame']
counts = np.array([undefined, rule_of_thirds, centered, leading_lines, diagonals_and_triangles, patterns_and_textures,
frame_within_frame, symmetrical, minimal, filling_the_frame])
fig4, ax = plt.subplots(figsize=(15, 15))
y_pos = np.arange(len(labels))
ax.barh(y_pos, counts, align='center')
ax.set_yticks(y_pos)
ax.set_yticklabels(labels)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Counts')
ax.set_title('Composition')
plt.savefig('../Plots/compositionStats.png')
plt.show()
##################################################### Composition ######################################################
######################################################### Type #########################################################
astro = typeCSV['Astro'].sum()
landscape = typeCSV['Landscape'].sum()
night = typeCSV['Night'].sum()
architectural = typeCSV['Architectural'].sum()
food = typeCSV['Food'].sum()
flora = typeCSV['Flora'].sum()
other = typeCSV['Other'].sum()
cityscape = typeCSV['Cityscape'].sum()
wildlife = typeCSV['Wildlife'].sum()
event = typeCSV['Event'].sum()
portrait = typeCSV['Portrait'].sum()
sports = typeCSV['Sports'].sum()
macro = typeCSV['Macro'].sum()
pet = typeCSV['Pet'].sum()
street = typeCSV['Street'].sum()
documentary = typeCSV['Documentary'].sum()
wedding = typeCSV['Wedding'].sum()
labels = ['Astro', 'Landscape', 'Night', 'Architectural', 'Food', 'Flora', 'Other', 'Cityscape', 'Wildlife', 'Event',
'Portrait', 'Sports', 'Macro', 'Pet', 'Street', 'Documentary', 'Wedding']
counts = np.array([astro, landscape, night, architectural, food, flora, other, cityscape, wildlife, event, portrait,
sports, macro, pet, street, documentary, wedding])
fig5, ax = plt.subplots(figsize=(15, 15))
y_pos = np.arange(len(labels))
ax.barh(y_pos, counts, align='center')
ax.set_yticks(y_pos)
ax.set_yticklabels(labels)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Counts')
ax.set_title('Type')
plt.savefig('../Plots/typeStats.png')
plt.show()
######################################################### Type #########################################################
| [
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"numpy.array",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((95, 171), 'pandas.read_csv', 'pd.read_csv', (['"""../input/style-classifier/Multi_Label_dataset/Tasks/color.csv"""'], {}), "('../input/style-classifier/Multi_Label_dataset/Tasks/color.csv')\n", (106, 171), True, 'import pandas as pd\n'), ((181, 255), 'pandas.read_csv', 'pd.read_csv', (['"""../input/style-classifier/Multi_Label_dataset/Tasks/dof.csv"""'], {}), "('../input/style-classifier/Multi_Label_dataset/Tasks/dof.csv')\n", (192, 255), True, 'import pandas as pd\n'), ((269, 347), 'pandas.read_csv', 'pd.read_csv', (['"""../input/style-classifier/Multi_Label_dataset/Tasks/palette.csv"""'], {}), "('../input/style-classifier/Multi_Label_dataset/Tasks/palette.csv')\n", (280, 347), True, 'import pandas as pd\n'), ((365, 452), 'pandas.read_csv', 'pd.read_csv', (['"""../input/style-classifier/Multi_Label_dataset/Tasks/composition.csv"""'], {}), "(\n '../input/style-classifier/Multi_Label_dataset/Tasks/composition.csv')\n", (376, 452), True, 'import pandas as pd\n'), ((458, 533), 'pandas.read_csv', 'pd.read_csv', (['"""../input/style-classifier/Multi_Label_dataset/Tasks/type.csv"""'], {}), "('../input/style-classifier/Multi_Label_dataset/Tasks/type.csv')\n", (469, 533), True, 'import pandas as pd\n'), ((795, 832), 'numpy.array', 'np.array', (['[colorful, black_and_white]'], {}), '([colorful, black_and_white])\n', (803, 832), True, 'import numpy as np\n'), ((845, 875), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (857, 875), True, 'import matplotlib.pyplot as plt\n'), ((1087, 1125), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../Plots/colorStats.png"""'], {}), "('../Plots/colorStats.png')\n", (1098, 1125), True, 'import matplotlib.pyplot as plt\n'), ((1126, 1136), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1134, 1136), True, 'import matplotlib.pyplot as plt\n'), ((1481, 1506), 'numpy.array', 'np.array', (['[deep, shallow]'], {}), '([deep, shallow])\n', (1489, 1506), True, 'import numpy as np\n'), ((1519, 1549), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (1531, 1549), True, 'import matplotlib.pyplot as plt\n'), ((1759, 1795), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../Plots/dofStats.png"""'], {}), "('../Plots/dofStats.png')\n", (1770, 1795), True, 'import matplotlib.pyplot as plt\n'), ((1796, 1806), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1804, 1806), True, 'import matplotlib.pyplot as plt\n'), ((2648, 2756), 'numpy.array', 'np.array', (['[black, blue, yellow, green, white, other, gray, brown, red, orange,\n human_skin, pink, violet]'], {}), '([black, blue, yellow, green, white, other, gray, brown, red,\n orange, human_skin, pink, violet])\n', (2656, 2756), True, 'import numpy as np\n'), ((2765, 2795), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (2777, 2795), True, 'import matplotlib.pyplot as plt\n'), ((3009, 3049), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../Plots/paletteStats.png"""'], {}), "('../Plots/paletteStats.png')\n", (3020, 3049), True, 'import matplotlib.pyplot as plt\n'), ((3050, 3060), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3058, 3060), True, 'import matplotlib.pyplot as plt\n'), ((4077, 4256), 'numpy.array', 'np.array', (['[undefined, rule_of_thirds, centered, leading_lines,\n diagonals_and_triangles, patterns_and_textures, frame_within_frame,\n symmetrical, minimal, filling_the_frame]'], {}), '([undefined, rule_of_thirds, centered, leading_lines,\n diagonals_and_triangles, patterns_and_textures, frame_within_frame,\n symmetrical, minimal, filling_the_frame])\n', (4085, 4256), True, 'import numpy as np\n'), ((4280, 4310), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (4292, 4310), True, 'import matplotlib.pyplot as plt\n'), ((4528, 4572), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../Plots/compositionStats.png"""'], {}), "('../Plots/compositionStats.png')\n", (4539, 4572), True, 'import matplotlib.pyplot as plt\n'), ((4573, 4583), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4581, 4583), True, 'import matplotlib.pyplot as plt\n'), ((5624, 5790), 'numpy.array', 'np.array', (['[astro, landscape, night, architectural, food, flora, other, cityscape,\n wildlife, event, portrait, sports, macro, pet, street, documentary, wedding\n ]'], {}), '([astro, landscape, night, architectural, food, flora, other,\n cityscape, wildlife, event, portrait, sports, macro, pet, street,\n documentary, wedding])\n', (5632, 5790), True, 'import numpy as np\n'), ((5814, 5844), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (5826, 5844), True, 'import matplotlib.pyplot as plt\n'), ((6055, 6092), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../Plots/typeStats.png"""'], {}), "('../Plots/typeStats.png')\n", (6066, 6092), True, 'import matplotlib.pyplot as plt\n'), ((6093, 6103), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6101, 6103), True, 'import matplotlib.pyplot as plt\n')] |
import torch
import numpy as np
import mmcv
import cv2
def get_mask_size(proposals_list, base_size, ratio_max):
ratio = 1.0
for proposals in proposals_list:
ratios = proposals[:, 2] / proposals[:, 3]
assert ratios.min() >= 1.0
ratio = max(ratio, ratios.ceil().max())
ratio = float(min(ratio_max, ratio))
pool_h = int(base_size)
pool_w = int(ratio * base_size)
return pool_w, pool_h
def crop_rotate_mask(gt_mask, x,y,w,h,theta):
rows, cols = gt_mask.shape
expand_layer = np.zeros((rows*2,cols*2),dtype='uint8')
rows_start = int(rows / 2)
cols_start = int(cols / 2)
expand_layer[rows_start:rows_start+rows, cols_start:cols_start+cols]=gt_mask
M = cv2.getRotationMatrix2D((x+cols_start,y+rows_start),theta*180/np.pi,1)
dst = cv2.warpAffine(expand_layer,M,expand_layer.shape[::-1],borderValue=0)
M = np.float32([[1.0,0,-x+w/2-cols_start],[0,1,-y+h/2-rows_start]])
dst = cv2.warpAffine(dst,M,dst.shape[::-1],borderValue=0)
dst = dst[:np.int(h), :np.int(w)]
return dst
def mask_rotate_adaptive_target(pos_proposals_list, pos_assigned_gt_inds_list,
gt_masks_list, cfg, ratio_max):
mask_w, mask_h = get_mask_size(pos_proposals_list, cfg['mask_size'], ratio_max)
mask_rotate_adaptive_targets_list = []
for i in range(len(gt_masks_list)):
mask_rotate_adaptive_targets = []
pos_assigned_gt_inds = pos_assigned_gt_inds_list[i]
pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
pos_proposals = pos_proposals_list[i]
proposals_np = pos_proposals.cpu().numpy()
gt_masks = gt_masks_list[i]
num_pos = proposals_np.shape[0]
if num_pos > 0:
for j in range(num_pos):
gt_mask = gt_masks[pos_assigned_gt_inds[j]]
bbox = proposals_np[j, :]
x, y, w, h, theta = bbox
w = np.maximum(w, 1)
h = np.maximum(h, 1)
dst = crop_rotate_mask(gt_mask, x, y, w, h, theta)
target = mmcv.imresize(dst, (mask_w, mask_h))
mask_rotate_adaptive_targets.append(target)
mask_rotate_adaptive_targets = torch.from_numpy(np.stack(mask_rotate_adaptive_targets)).float().to(
pos_proposals.device)
else:
mask_rotate_adaptive_targets = pos_proposals.new_zeros((0, mask_h, mask_w))
mask_rotate_adaptive_targets_list.append(mask_rotate_adaptive_targets)
return torch.cat(mask_rotate_adaptive_targets_list)
| [
"cv2.warpAffine",
"mmcv.imresize",
"numpy.stack",
"numpy.zeros",
"cv2.getRotationMatrix2D",
"numpy.maximum",
"numpy.int",
"numpy.float32",
"torch.cat"
] | [((532, 577), 'numpy.zeros', 'np.zeros', (['(rows * 2, cols * 2)'], {'dtype': '"""uint8"""'}), "((rows * 2, cols * 2), dtype='uint8')\n", (540, 577), True, 'import numpy as np\n'), ((723, 809), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(x + cols_start, y + rows_start)', '(theta * 180 / np.pi)', '(1)'], {}), '((x + cols_start, y + rows_start), theta * 180 / np.\n pi, 1)\n', (746, 809), False, 'import cv2\n'), ((804, 876), 'cv2.warpAffine', 'cv2.warpAffine', (['expand_layer', 'M', 'expand_layer.shape[::-1]'], {'borderValue': '(0)'}), '(expand_layer, M, expand_layer.shape[::-1], borderValue=0)\n', (818, 876), False, 'import cv2\n'), ((882, 967), 'numpy.float32', 'np.float32', (['[[1.0, 0, -x + w / 2 - cols_start], [0, 1, -y + h / 2 - rows_start]]'], {}), '([[1.0, 0, -x + w / 2 - cols_start], [0, 1, -y + h / 2 - rows_start]]\n )\n', (892, 967), True, 'import numpy as np\n'), ((956, 1010), 'cv2.warpAffine', 'cv2.warpAffine', (['dst', 'M', 'dst.shape[::-1]'], {'borderValue': '(0)'}), '(dst, M, dst.shape[::-1], borderValue=0)\n', (970, 1010), False, 'import cv2\n'), ((2557, 2601), 'torch.cat', 'torch.cat', (['mask_rotate_adaptive_targets_list'], {}), '(mask_rotate_adaptive_targets_list)\n', (2566, 2601), False, 'import torch\n'), ((1023, 1032), 'numpy.int', 'np.int', (['h'], {}), '(h)\n', (1029, 1032), True, 'import numpy as np\n'), ((1035, 1044), 'numpy.int', 'np.int', (['w'], {}), '(w)\n', (1041, 1044), True, 'import numpy as np\n'), ((1971, 1987), 'numpy.maximum', 'np.maximum', (['w', '(1)'], {}), '(w, 1)\n', (1981, 1987), True, 'import numpy as np\n'), ((2008, 2024), 'numpy.maximum', 'np.maximum', (['h', '(1)'], {}), '(h, 1)\n', (2018, 2024), True, 'import numpy as np\n'), ((2117, 2153), 'mmcv.imresize', 'mmcv.imresize', (['dst', '(mask_w, mask_h)'], {}), '(dst, (mask_w, mask_h))\n', (2130, 2153), False, 'import mmcv\n'), ((2274, 2312), 'numpy.stack', 'np.stack', (['mask_rotate_adaptive_targets'], {}), '(mask_rotate_adaptive_targets)\n', (2282, 2312), True, 'import numpy as np\n')] |
from scipy import stats
import numpy as np
__all__ = ['chisquare', 'kolsmi']
def kolsmi(dist, fit_result, data):
"""Perform a Kolmogorow-Smirnow-Test for goodness of fit.
This tests the H0 hypothesis, if data is a sample of dist
Args:
dist: A mle.Distribution instance
fit_result: The solution dict, returned by the Distribution.fit method
data: The data used in Distribution.fit
Returns:
teststat: the test statistic, e.g. the max distance between the
cumulated distributions
p-value: the p-value, probability that dist describes the data
"""
variables = dist.get_vars()
if len(variables) > 1:
raise ValueError("Kolmogorov-Smirnov-Test is only valid for 1d distributions")
var = variables[0]
teststat, pvalue = stats.kstest(data[var.name], lambda x: dist.cdf(x, **fit_result["x"]))
return teststat, pvalue
def chisquare(dist, fit_result, data, bins=None, range=None):
"""Perform a Chi^2 test for goodness of fit.
Tests the H0 hypothesis if the distances between fit result and
data are compatible with random fluctuations.
Args:
dist: A mle.Distribution instance
fit_result: The solution dict, returned by the Distribution.fit method
data: The data used in Distribution.fit
bins: Number of bins for the histogram (default: 1+log2(N))
range: Range for the histogram (default: min(data), max(data))
Returns:
chisquare: the test statistic, chi^2/ndf
p-value: the p-value, probability that differences between dist
and data are compatible with random fluctuation
"""
variables = dist.get_vars()
if len(variables) > 1:
raise ValueError("This is a 1d only chisquare test")
var = variables[0]
# rule of thumb for number if bins if not provided
if bins is None:
bins = np.ceil(2*len(data[var.name])**(1.0/3.0))
entries, edges = np.histogram(data[var.name], bins=bins, range=range)
# get expected frequencies from the cdf
cdf = dist.cdf(edges, **fit_result["x"])
exp_entries = np.round(len(data[var.name]) * (cdf[1:]-cdf[:-1]))
# use only bins where more then 4 entries are expected
mask = exp_entries >= 5
chisq, pvalue = stats.chisquare(entries[mask], exp_entries[mask], ddof=len(fit_result["x"]))
chisq = chisq/(np.sum(mask) - len(fit_result["x"]) - 1)
return chisq, pvalue
| [
"numpy.sum",
"numpy.histogram"
] | [((2053, 2105), 'numpy.histogram', 'np.histogram', (['data[var.name]'], {'bins': 'bins', 'range': 'range'}), '(data[var.name], bins=bins, range=range)\n', (2065, 2105), True, 'import numpy as np\n'), ((2470, 2482), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (2476, 2482), True, 'import numpy as np\n')] |
from model import Model, Optimizer
import numpy as np
IMAGE_SIZE = 28
class ClientModel(Model):
def __init__(self, lr, num_classes, max_batch_size=None, seed=None, optimizer=None):
self.num_classes = num_classes
super(ClientModel, self).__init__(lr, seed, max_batch_size, optimizer=ErmOptimizer())
def create_model(self):
"""Model function for linear model."""
#features = tf.placeholder(
# tf.float32, shape=[None, IMAGE_SIZE * IMAGE_SIZE], name='features')
#labels = tf.placeholder(tf.int64, shape=[None], name='labels')
#logits = tf.layers.dense(inputs=features, units=self.num_classes)
#predictions = {
# "classes": tf.argmax(input=logits, axis=1),
# "probabilities": tf.nn.softmax(logits, name="softmax_tensor")
#}
#loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
#train_op = self.optimizer.minimize(
# loss=loss,
# global_step=tf.train.get_global_step())
#eval_metric_ops = tf.count_nonzero(tf.equal(labels, predictions["classes"]))
#return features, labels, loss, train_op,
# def process_y(self, raw_y_batch):
# """Pre-processes each batch of labels before being fed to the model."""
# res = []
# for i in range(len(raw_y_batch)):
# num = np.zeros(62) # Number of classes
# num[raw_y_batch[i]] = 1.0
# res.append(num)
# return np.asarray(res)
class ErmOptimizer(Optimizer):
def __init__(self, starting_w=np.zeros(50)):
super(ErmOptimizer, self).__init__(starting_w)
self.optimizer_model = None
# self.learning_rate = 0.0003
self.learning_rate = 0.000001 # used before launched 0.52 on train accuracy
# self.lmbda = 0.01
# self.lmbda = 0.1 # used before
self.lmbda = 0.001
# print("lol" + str(self.learning_rate))
def single_loss(self, x, y):
# y_hat = self.softmax(np.dot(x, self.w))
# return - np.sum(y * np.log(y_hat) + 1e-6) + self.lmbda/2 * np.linalg.norm(self.w)**2
pred = np.matmul(x.reshape(1, -1), self.w)
loss = np.mean(y.reshape(1) * np.log(1 + np.exp(-pred)) + (1 - y.reshape(1)) * np.log(1 + np.exp(pred)))
return loss
def loss(self, batched_x, batched_y):
n = len(batched_y)
loss = 0.0
for i in range(n):
loss += self.single_loss(batched_x[i], batched_y[i])
averaged_loss = loss / n
return averaged_loss + self.lmbda/2 * np.linalg.norm(self.w)**2
def gradient(self, x, y): # x is only 1 image here
# p_hat = self.softmax(np.dot(x, self.w)) # vector of probabilities
# a = np.sum(y) * p_hat - y
#
# return np.outer(x, a) + self.lmbda * self.w
image = x.reshape(1, -1)
target = y.reshape(1)
inp = np.matmul(image, self.w)
loglossderiv = (-target / (1 + np.exp(inp)) + (1 - target) / (1 + np.exp(-inp))) / target.shape[0]
return np.matmul(loglossderiv, image) + self.lmbda * self.w
def run_step(self, batched_x, batched_y):
loss = 0.0
s = np.zeros(self.w.shape)
n = len(batched_y)
for i in range(n):
s += self.learning_rate * self.gradient(batched_x[i], batched_y[i])
loss += self.single_loss(batched_x[i], batched_y[i])
self.w -= s/n
averaged_loss = loss/n
return averaged_loss
def update_w(self):
self.w_on_last_update = self.w
def correct_single_label(self, x, y):
proba = self.sigmoid(np.dot(x, self.w))
if proba >= 0.5:
prediction = 1.0
else:
prediction = 0.0
return float(prediction == y)
def initialize_w(self):
self.w = np.zeros(50)
self.w_on_last_update = np.zeros(50)
def correct(self, x, y):
nb_correct = 0.0
for i in range(len(y)):
nb_correct += self.correct_single_label(x[i], y[i])
return nb_correct
def size(self):
return 50
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
| [
"numpy.exp",
"numpy.dot",
"numpy.zeros",
"numpy.matmul",
"numpy.linalg.norm"
] | [((1583, 1595), 'numpy.zeros', 'np.zeros', (['(50)'], {}), '(50)\n', (1591, 1595), True, 'import numpy as np\n'), ((2920, 2944), 'numpy.matmul', 'np.matmul', (['image', 'self.w'], {}), '(image, self.w)\n', (2929, 2944), True, 'import numpy as np\n'), ((3198, 3220), 'numpy.zeros', 'np.zeros', (['self.w.shape'], {}), '(self.w.shape)\n', (3206, 3220), True, 'import numpy as np\n'), ((3840, 3852), 'numpy.zeros', 'np.zeros', (['(50)'], {}), '(50)\n', (3848, 3852), True, 'import numpy as np\n'), ((3885, 3897), 'numpy.zeros', 'np.zeros', (['(50)'], {}), '(50)\n', (3893, 3897), True, 'import numpy as np\n'), ((3067, 3097), 'numpy.matmul', 'np.matmul', (['loglossderiv', 'image'], {}), '(loglossderiv, image)\n', (3076, 3097), True, 'import numpy as np\n'), ((3639, 3656), 'numpy.dot', 'np.dot', (['x', 'self.w'], {}), '(x, self.w)\n', (3645, 3656), True, 'import numpy as np\n'), ((4166, 4176), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (4172, 4176), True, 'import numpy as np\n'), ((2582, 2604), 'numpy.linalg.norm', 'np.linalg.norm', (['self.w'], {}), '(self.w)\n', (2596, 2604), True, 'import numpy as np\n'), ((2984, 2995), 'numpy.exp', 'np.exp', (['inp'], {}), '(inp)\n', (2990, 2995), True, 'import numpy as np\n'), ((3019, 3031), 'numpy.exp', 'np.exp', (['(-inp)'], {}), '(-inp)\n', (3025, 3031), True, 'import numpy as np\n'), ((2238, 2251), 'numpy.exp', 'np.exp', (['(-pred)'], {}), '(-pred)\n', (2244, 2251), True, 'import numpy as np\n'), ((2287, 2299), 'numpy.exp', 'np.exp', (['pred'], {}), '(pred)\n', (2293, 2299), True, 'import numpy as np\n')] |
import time
import os
import pickle
import argparse
import multiprocessing as mp
from multiprocessing import Pool
import numpy as np
from single_peaked_bandits.solvers import OptimalSolver
from single_peaked_bandits.helpers import (
get_reward_for_policy,
)
from single_peaked_bandits.constants import RESULTS_FOLDER, PICKLE_FOLDER
from single_peaked_bandits.experiments import EXPERIMENTS
from make_plots import make_plots
def run_bandit_solver(job):
bandit = job["bandit"]
solver = job["solver"]
randomize_bandit = job["randomize_bandit"]
compute_regret = job["compute_regret"]
T = job["T"]
solver_instance = solver()
t = time.time()
if randomize_bandit:
bandit._new_random_arms()
policy = solver_instance.solve(bandit, T)
cumulative_reward = get_reward_for_policy(bandit.noise_free_arms, T, policy)
if compute_regret:
optimal_solver = OptimalSolver()
optimal_policy = optimal_solver.solve(bandit, T)
optimal_reward = get_reward_for_policy(
bandit.noise_free_arms, T, optimal_policy
)
single_peaked_bandits = optimal_reward - cumulative_reward
else:
single_peaked_bandits = None
print(
f"{solver_instance.name}, {bandit.name}, T: {T}, time: {time.time() - t}, "
f"policy: {policy}, cumulative_reward: {cumulative_reward}"
)
return {
"bandit": bandit.name,
"solver": solver_instance.name,
"T": T,
"policy": policy,
"cumulative_reward": cumulative_reward,
"single_peaked_bandits": single_peaked_bandits,
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--labels",
type=str,
help="Experiments to launch.",
default="inc_dec_1",
)
parser.add_argument(
"--n_jobs", type=int, help="Number of jobs to launch in parallel.", default=1
)
parser.add_argument(
"--n_discretization",
type=int,
help="Discretization of the time-horzion.",
default=100,
)
parser.add_argument(
"--n_seeds",
type=int,
help="Number of random seeds to run per experiment.",
default=30,
)
parser.add_argument("--optimal", action="store_true")
parser.add_argument("--randomize_bandit", action="store_true")
parser.add_argument("--compute_regret", action="store_true")
args = parser.parse_args()
pickle_path = os.path.join(RESULTS_FOLDER, PICKLE_FOLDER)
os.makedirs(pickle_path, exist_ok=True)
labels = args.labels.split(",")
print("Running experiments:", ", ".join(labels))
# Collect selected experiments
jobs = []
for experiment_label in labels:
experiment_bandits, solvers = EXPERIMENTS[experiment_label]
if args.optimal:
solvers = [OptimalSolver]
for bandit_n, bandit in enumerate(experiment_bandits):
if args.optimal:
T_array = np.arange(1, bandit.Tmax + 1)
else:
T_array = np.linspace(
2 * bandit.n + 1, bandit.Tmax, args.n_discretization, dtype=np.int
)
for solver in solvers:
if bandit.stochastic or solver.stochastic:
n_runs = args.n_seeds
else:
n_runs = 1
for _ in range(n_runs):
for T in T_array:
jobs.append(
{
"bandit": bandit,
"solver": solver,
"T": T,
"randomize_bandit": args.randomize_bandit,
"compute_regret": args.compute_regret,
}
)
# Run experiments
start_time = time.time()
if args.n_jobs == 1:
results = []
for job in jobs:
result = run_bandit_solver(job)
results.append(result)
else:
with mp.get_context("spawn").Pool(args.n_jobs) as p:
results = p.map(run_bandit_solver, jobs, chunksize=1)
# Aggregate results
results_aggregated = dict()
for res in results:
bandit = res["bandit"]
solver = res["solver"]
T = res["T"]
policy = res["policy"]
cumulative_reward = res["cumulative_reward"]
single_peaked_bandits = res["single_peaked_bandits"]
if (bandit, solver) not in results_aggregated:
results_aggregated[(bandit, solver)] = dict()
if T not in results_aggregated[(bandit, solver)]:
results_aggregated[(bandit, solver)][T] = []
results_aggregated[(bandit, solver)][T].append(
(policy, cumulative_reward, single_peaked_bandits)
)
# Write results
for (bandit, solver), results in results_aggregated.items():
T_list = []
policy_list = [[] for _ in range(args.n_seeds)]
cumulative_reward_list = [[] for _ in range(args.n_seeds)]
for T in sorted(results.keys()):
T_list.append(T)
for i in range(len(results[T])):
policy, cumulative_reward, single_peaked_bandits = results[T][i]
policy_list[i].append(tuple(policy))
if args.compute_regret:
cumulative_reward_list[i].append(single_peaked_bandits)
else:
cumulative_reward_list[i].append(cumulative_reward)
pickle_file = os.path.join(pickle_path, f"{bandit}_{solver}_result.p")
with open(pickle_file, "wb") as f:
pickle.dump(
(
bandit,
solver,
tuple(T_list),
tuple([tuple(x) for x in policy_list]),
tuple([tuple(x) for x in cumulative_reward_list]),
),
f,
)
make_plots()
print("Done in", time.time() - start_time)
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"os.makedirs",
"single_peaked_bandits.helpers.get_reward_for_policy",
"make_plots.make_plots",
"os.path.join",
"multiprocessing.get_context",
"numpy.linspace",
"single_peaked_bandits.solvers.OptimalSolver",
"time.time",
"numpy.arange"
] | [((662, 673), 'time.time', 'time.time', ([], {}), '()\n', (671, 673), False, 'import time\n'), ((805, 861), 'single_peaked_bandits.helpers.get_reward_for_policy', 'get_reward_for_policy', (['bandit.noise_free_arms', 'T', 'policy'], {}), '(bandit.noise_free_arms, T, policy)\n', (826, 861), False, 'from single_peaked_bandits.helpers import get_reward_for_policy\n'), ((1649, 1674), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1672, 1674), False, 'import argparse\n'), ((2473, 2516), 'os.path.join', 'os.path.join', (['RESULTS_FOLDER', 'PICKLE_FOLDER'], {}), '(RESULTS_FOLDER, PICKLE_FOLDER)\n', (2485, 2516), False, 'import os\n'), ((2521, 2560), 'os.makedirs', 'os.makedirs', (['pickle_path'], {'exist_ok': '(True)'}), '(pickle_path, exist_ok=True)\n', (2532, 2560), False, 'import os\n'), ((3893, 3904), 'time.time', 'time.time', ([], {}), '()\n', (3902, 3904), False, 'import time\n'), ((5992, 6004), 'make_plots.make_plots', 'make_plots', ([], {}), '()\n', (6002, 6004), False, 'from make_plots import make_plots\n'), ((911, 926), 'single_peaked_bandits.solvers.OptimalSolver', 'OptimalSolver', ([], {}), '()\n', (924, 926), False, 'from single_peaked_bandits.solvers import OptimalSolver\n'), ((1009, 1073), 'single_peaked_bandits.helpers.get_reward_for_policy', 'get_reward_for_policy', (['bandit.noise_free_arms', 'T', 'optimal_policy'], {}), '(bandit.noise_free_arms, T, optimal_policy)\n', (1030, 1073), False, 'from single_peaked_bandits.helpers import get_reward_for_policy\n'), ((5570, 5626), 'os.path.join', 'os.path.join', (['pickle_path', 'f"""{bandit}_{solver}_result.p"""'], {}), "(pickle_path, f'{bandit}_{solver}_result.p')\n", (5582, 5626), False, 'import os\n'), ((6026, 6037), 'time.time', 'time.time', ([], {}), '()\n', (6035, 6037), False, 'import time\n'), ((2985, 3014), 'numpy.arange', 'np.arange', (['(1)', '(bandit.Tmax + 1)'], {}), '(1, bandit.Tmax + 1)\n', (2994, 3014), True, 'import numpy as np\n'), ((3059, 3138), 'numpy.linspace', 'np.linspace', (['(2 * bandit.n + 1)', 'bandit.Tmax', 'args.n_discretization'], {'dtype': 'np.int'}), '(2 * bandit.n + 1, bandit.Tmax, args.n_discretization, dtype=np.int)\n', (3070, 3138), True, 'import numpy as np\n'), ((1289, 1300), 'time.time', 'time.time', ([], {}), '()\n', (1298, 1300), False, 'import time\n'), ((4078, 4101), 'multiprocessing.get_context', 'mp.get_context', (['"""spawn"""'], {}), "('spawn')\n", (4092, 4101), True, 'import multiprocessing as mp\n')] |
import numpy as np
def identity_function(x):
return x
def leaky_relu(x):
return np.max(0.1 * x, x)
def relu(x):
return np.max(0, x)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def tanh(x):
return np.tanh(x)
def step_function(x):
return np.array(x > 0, dtype=np.int)
def softmax(x):
if x.ndim == 2:
x = x.T
x -= np.max(x, axis=0)
y = np.exp(x) / np.sum(np.exp(x), axis=0)
return y.T
x -= np.max(x) # 오버플로 대책
return np.exp(x) / np.sum(np.exp(x))
| [
"numpy.exp",
"numpy.array",
"numpy.tanh",
"numpy.max"
] | [((92, 110), 'numpy.max', 'np.max', (['(0.1 * x)', 'x'], {}), '(0.1 * x, x)\n', (98, 110), True, 'import numpy as np\n'), ((137, 149), 'numpy.max', 'np.max', (['(0)', 'x'], {}), '(0, x)\n', (143, 149), True, 'import numpy as np\n'), ((226, 236), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (233, 236), True, 'import numpy as np\n'), ((272, 301), 'numpy.array', 'np.array', (['(x > 0)'], {'dtype': 'np.int'}), '(x > 0, dtype=np.int)\n', (280, 301), True, 'import numpy as np\n'), ((466, 475), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (472, 475), True, 'import numpy as np\n'), ((369, 386), 'numpy.max', 'np.max', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (375, 386), True, 'import numpy as np\n'), ((498, 507), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (504, 507), True, 'import numpy as np\n'), ((188, 198), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (194, 198), True, 'import numpy as np\n'), ((399, 408), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (405, 408), True, 'import numpy as np\n'), ((517, 526), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (523, 526), True, 'import numpy as np\n'), ((418, 427), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (424, 427), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import argparse
# import matplotlib.pyplot as plt
import numpy as np
from torch.autograd import Variable
# from sklearn.decomposition import PCA
import settings
from classifier import Classifier
from dataset import WeightedTensorDataset
init_weight = 1
init_p_size = 125 # 650
init_n_size = 125 # 450
init_p_un_size = 0 # 1000
init_n_un_size = 0 # 500
# uncertainty_pool = 3500
uncertainty_pool_p = 1000
uncertainty_pool_n = 1000
pho_p = 0.1
pho_n = 0
batch_size = 40
num_clss = 1
learning_rate = 1e-3
test_on_train = False
retrain_epochs = 120
convex_epochs = 10
n_pca_components = 784
parser = argparse.ArgumentParser(description='MNIST noise active learning')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
settings.dtype = torch.cuda.FloatTensor
# torchvision.datasets.MNIST outputs a set of PIL images
# We transform them to tensors
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
# Load and transform data
mnist = torchvision.datasets.MNIST(
'datasets/MNIST', train=True, download=True, transform=transform)
mnist_test = torchvision.datasets.MNIST(
'datasets/MNIST', train=False, download=True, transform=transform)
train_data = mnist.train_data.numpy()
train_labels = mnist.train_labels.numpy()
used_idxs = np.logical_or(train_labels == 3, train_labels == 8)
train_labels = (train_labels-3)/2.5-1
# used_idxs = np.logical_or(train_labels == 7, train_labels == 9)
# train_labels = train_labels - 8
# used_idxs = np.logical_or(train_labels == 3, train_labels == 5)
# train_labels = train_labels - 4
# pca = PCA(n_components=n_pca_components)
# train_data = pca.fit_transform(train_data.reshape(-1, 784))
train_data = train_data[used_idxs]
train_labels = train_labels[used_idxs]
train_data = torch.from_numpy(train_data).unsqueeze(1).float()
train_labels = torch.from_numpy(train_labels).unsqueeze(1).float()
training_set = WeightedTensorDataset(
train_data, train_labels, init_weight * torch.ones(len(train_data), 1))
test_data = mnist_test.test_data.numpy()
test_labels = mnist_test.test_labels.numpy()
used_idxs = np.logical_or(test_labels == 3, test_labels == 8)
test_labels = (test_labels-3)/2.5-1
# used_idxs = np.logical_or(test_labels == 7, test_labels == 9)
# test_labels = test_labels - 8
# used_idxs = np.logical_or(test_labels == 3, test_labels == 5)
# test_labels = test_labels - 4
# test_data = pca.transform(test_data.reshape(-1, 784))
test_set = data.TensorDataset(
torch.from_numpy(test_data[used_idxs]).unsqueeze(1).float(),
torch.from_numpy(test_labels[used_idxs]).unsqueeze(1).float())
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 3, 5, 1)
self.conv2 = nn.Conv2d(3, 6, 5, 1)
self.fc1 = nn.Linear(4*4*6, 20)
self.fc2 = nn.Linear(20, 1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*6)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return x
class Linear(nn.Module):
def __init__(self):
super(Linear, self).__init__()
self.linear = nn.Linear(n_pca_components, 1)
def forward(self, x):
y_pred = self.linear(x.view(-1, n_pca_components))
return y_pred
model = Net().cuda() if args.cuda else Net()
# model = Linear().cuda if args.cuda else Linear()
cls = Classifier(model)
cls.train(training_set, test_set, batch_size, 4, 1)
output = cls.model(Variable(train_data).type(settings.dtype)).cpu()
probs = F.sigmoid(output).data.numpy().reshape(-1)
sorted_margin = torch.from_numpy(np.argsort(np.abs(probs-0.5)))
# margin_sorted_data = train_data[sorted_margin]
# grid_img = torchvision.utils.make_grid(
# torch.cat((margin_sorted_data[:32], margin_sorted_data[-32:]), 0))
# plt.imshow(np.transpose(grid_img.numpy(), (1, 2, 0)))
# plt.show()
p_sorted_margin = []
n_sorted_margin = []
for i in sorted_margin:
if train_labels[i][0] == 1:
p_sorted_margin.append(i)
else:
n_sorted_margin.append(i)
tmp_labels = train_labels.numpy().reshape(-1)
p_idxs = tmp_labels == 1
n_idxs = tmp_labels == -1
# un_idxs = np.zeros(len(train_data))
# un_idxs[sorted_margin[:uncertainty_pool]] = True
# p_un = np.argwhere(np.logical_and(p_idxs, un_idxs)).reshape(-1)
p_un = p_sorted_margin[:uncertainty_pool_p]
drawn = np.random.choice(p_un, init_p_un_size, replace=False)
# n_un = np.argwhere(np.logical_and(n_idxs, un_idxs)).reshape(-1)
n_un = n_sorted_margin[:uncertainty_pool_n]
drawn2 = np.random.choice(n_un, init_n_un_size, replace=False)
# plt_data = train_data[torch.from_numpy(
# np.concatenate([drawn[:32], drawn2[:32]]))]
# print(plt_data)
# grid_img = torchvision.utils.make_grid(plt_data)
# plt.imshow(np.transpose(grid_img.numpy(), (1, 2, 0)))
# plt.show()
dr_idxs = np.zeros(len(train_data))
dr_idxs[drawn] = True
dr_idxs[drawn2] = True
p_rest = np.argwhere(
np.logical_and(p_idxs, np.logical_not(dr_idxs))).reshape(-1)
n_rest = np.argwhere(
np.logical_and(n_idxs, np.logical_not(dr_idxs))).reshape(-1)
drawn3 = np.random.choice(p_rest, init_p_size, replace=False)
drawn4 = np.random.choice(n_rest, init_n_size, replace=False)
drawn = torch.from_numpy(
np.concatenate([drawn, drawn2, drawn3, drawn4]))
given_data = train_data[drawn]
given_labels = train_labels[drawn]
for i, label in enumerate(given_labels):
assert label[0] == 1 or label[0] == -1
if label[0] == 1 and np.random.random() < pho_p:
# print('flip +1')
given_labels[i] = -1
elif np.random.random() < pho_n:
# print('flip -1')
given_labels[i] = 1
labeled_set = WeightedTensorDataset(
given_data, given_labels, init_weight * torch.ones(len(given_data), 1))
print(len(labeled_set))
def create_new_classifier():
model = Net().cuda() if args.cuda else Net()
# model = Linear().cuda() if args.cuda else Linear()
cls = Classifier(
model,
pho_p=pho_p,
pho_n=pho_n,
lr=learning_rate,
weighted=True)
return cls
clss = [create_new_classifier() for _ in range(num_clss)]
for i, cls in enumerate(clss):
print('classifier {}'.format(i))
cls.train(
labeled_set, test_set, batch_size,
retrain_epochs, convex_epochs, test_on_train=test_on_train)
| [
"numpy.logical_not",
"classifier.Classifier",
"torch.from_numpy",
"torch.nn.functional.sigmoid",
"torch.cuda.is_available",
"argparse.ArgumentParser",
"numpy.random.random",
"numpy.concatenate",
"torchvision.transforms.ToTensor",
"torch.autograd.Variable",
"numpy.abs",
"numpy.random.choice",
... | [((772, 838), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MNIST noise active learning"""'}), "(description='MNIST noise active learning')\n", (795, 838), False, 'import argparse\n'), ((1329, 1425), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', (['"""datasets/MNIST"""'], {'train': '(True)', 'download': '(True)', 'transform': 'transform'}), "('datasets/MNIST', train=True, download=True,\n transform=transform)\n", (1355, 1425), False, 'import torchvision\n'), ((1441, 1538), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', (['"""datasets/MNIST"""'], {'train': '(False)', 'download': '(True)', 'transform': 'transform'}), "('datasets/MNIST', train=False, download=True,\n transform=transform)\n", (1467, 1538), False, 'import torchvision\n'), ((1634, 1685), 'numpy.logical_or', 'np.logical_or', (['(train_labels == 3)', '(train_labels == 8)'], {}), '(train_labels == 3, train_labels == 8)\n', (1647, 1685), True, 'import numpy as np\n'), ((2450, 2499), 'numpy.logical_or', 'np.logical_or', (['(test_labels == 3)', '(test_labels == 8)'], {}), '(test_labels == 3, test_labels == 8)\n', (2463, 2499), True, 'import numpy as np\n'), ((3867, 3884), 'classifier.Classifier', 'Classifier', (['model'], {}), '(model)\n', (3877, 3884), False, 'from classifier import Classifier\n'), ((4841, 4894), 'numpy.random.choice', 'np.random.choice', (['p_un', 'init_p_un_size'], {'replace': '(False)'}), '(p_un, init_p_un_size, replace=False)\n', (4857, 4894), True, 'import numpy as np\n'), ((5015, 5068), 'numpy.random.choice', 'np.random.choice', (['n_un', 'init_n_un_size'], {'replace': '(False)'}), '(n_un, init_n_un_size, replace=False)\n', (5031, 5068), True, 'import numpy as np\n'), ((5567, 5619), 'numpy.random.choice', 'np.random.choice', (['p_rest', 'init_p_size'], {'replace': '(False)'}), '(p_rest, init_p_size, replace=False)\n', (5583, 5619), True, 'import numpy as np\n'), ((5629, 5681), 'numpy.random.choice', 'np.random.choice', (['n_rest', 'init_n_size'], {'replace': '(False)'}), '(n_rest, init_n_size, replace=False)\n', (5645, 5681), True, 'import numpy as np\n'), ((1020, 1045), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1043, 1045), False, 'import torch\n'), ((5713, 5760), 'numpy.concatenate', 'np.concatenate', (['[drawn, drawn2, drawn3, drawn4]'], {}), '([drawn, drawn2, drawn3, drawn4])\n', (5727, 5760), True, 'import numpy as np\n'), ((6401, 6477), 'classifier.Classifier', 'Classifier', (['model'], {'pho_p': 'pho_p', 'pho_n': 'pho_n', 'lr': 'learning_rate', 'weighted': '(True)'}), '(model, pho_p=pho_p, pho_n=pho_n, lr=learning_rate, weighted=True)\n', (6411, 6477), False, 'from classifier import Classifier\n'), ((1232, 1253), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1251, 1253), True, 'import torchvision.transforms as transforms\n'), ((1255, 1291), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5,)', '(1.0,)'], {}), '((0.5,), (1.0,))\n', (1275, 1291), True, 'import torchvision.transforms as transforms\n'), ((3055, 3076), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(3)', '(5)', '(1)'], {}), '(1, 3, 5, 1)\n', (3064, 3076), True, 'import torch.nn as nn\n'), ((3098, 3119), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(6)', '(5)', '(1)'], {}), '(3, 6, 5, 1)\n', (3107, 3119), True, 'import torch.nn as nn\n'), ((3139, 3163), 'torch.nn.Linear', 'nn.Linear', (['(4 * 4 * 6)', '(20)'], {}), '(4 * 4 * 6, 20)\n', (3148, 3163), True, 'import torch.nn as nn\n'), ((3179, 3195), 'torch.nn.Linear', 'nn.Linear', (['(20)', '(1)'], {}), '(20, 1)\n', (3188, 3195), True, 'import torch.nn as nn\n'), ((3269, 3290), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)', '(2)'], {}), '(x, 2, 2)\n', (3281, 3290), True, 'import torch.nn.functional as F\n'), ((3337, 3358), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)', '(2)'], {}), '(x, 2, 2)\n', (3349, 3358), True, 'import torch.nn.functional as F\n'), ((3433, 3469), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training'}), '(x, training=self.training)\n', (3442, 3469), True, 'import torch.nn.functional as F\n'), ((3624, 3654), 'torch.nn.Linear', 'nn.Linear', (['n_pca_components', '(1)'], {}), '(n_pca_components, 1)\n', (3633, 3654), True, 'import torch.nn as nn\n'), ((4100, 4119), 'numpy.abs', 'np.abs', (['(probs - 0.5)'], {}), '(probs - 0.5)\n', (4106, 4119), True, 'import numpy as np\n'), ((5940, 5958), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5956, 5958), True, 'import numpy as np\n'), ((6033, 6051), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (6049, 6051), True, 'import numpy as np\n'), ((2119, 2147), 'torch.from_numpy', 'torch.from_numpy', (['train_data'], {}), '(train_data)\n', (2135, 2147), False, 'import torch\n'), ((2184, 2214), 'torch.from_numpy', 'torch.from_numpy', (['train_labels'], {}), '(train_labels)\n', (2200, 2214), False, 'import torch\n'), ((5432, 5455), 'numpy.logical_not', 'np.logical_not', (['dr_idxs'], {}), '(dr_idxs)\n', (5446, 5455), True, 'import numpy as np\n'), ((5519, 5542), 'numpy.logical_not', 'np.logical_not', (['dr_idxs'], {}), '(dr_idxs)\n', (5533, 5542), True, 'import numpy as np\n'), ((2821, 2859), 'torch.from_numpy', 'torch.from_numpy', (['test_data[used_idxs]'], {}), '(test_data[used_idxs])\n', (2837, 2859), False, 'import torch\n'), ((2886, 2926), 'torch.from_numpy', 'torch.from_numpy', (['test_labels[used_idxs]'], {}), '(test_labels[used_idxs])\n', (2902, 2926), False, 'import torch\n'), ((3956, 3976), 'torch.autograd.Variable', 'Variable', (['train_data'], {}), '(train_data)\n', (3964, 3976), False, 'from torch.autograd import Variable\n'), ((4013, 4030), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['output'], {}), '(output)\n', (4022, 4030), True, 'import torch.nn.functional as F\n')] |
# import
import numpy as np
import json
import pandas as pd
import torch
import os
def seed_everything(seed):
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
seed_everything(42)
class TrainPipeline:
def __init__(self, hparams, gpu, model, Dataset_train):
# load the model
self.hparams = hparams
self.gpu = gpu
self.Dataset_train = Dataset_train
print('\n')
print('Selected Learning rate:', self.hparams['optimizer_hparams']['lr'])
print('\n')
self.exclusions = []
self.splits, self.splits_test = self.load_split_table()
self.model = model
def load_split_table(self):
splits_cv = pd.DataFrame([json.load(open(self.hparams['split_table_path']))])
splits_test = pd.DataFrame([json.load(open(self.hparams['test_split_table_path']))])
return splits_cv, splits_test
def train(self):
self.model = self.model(hparams=self.hparams, gpu=self.gpu)
train = self.Dataset_train(
self.splits['train'].values[0], aug=True, dataset=self.hparams['dataset'],
)
valid = self.Dataset_train(self.splits['val'].values[0], aug=True, dataset=self.hparams['dataset'],)
# train model
start_training = self.model.fit(train=train, valid=valid)
# get model predictions
contrastive_loss = self.model.predict(valid)
print("Model's final contrastive loss: ", contrastive_loss)
# save the model
self.model.save(
self.hparams['model_path']
+ self.hparams['model_name']
+ f"_{self.hparams['split_table_path'].split('/')[-1][:-5]}"
+ '_fold_'
+ str(np.round(contrastive_loss, 2))
+ '_'
+ str(start_training)
)
return contrastive_loss, start_training
def save_debug_data(self, error, validation_list):
for index, data in enumerate(validation_list):
patient_fold = data.split('/')[-2]
data = data.split('/')[-1]
out_json = {}
out_json['error'] = error[index].tolist()
os.makedirs(self.hparams['debug_path'] + patient_fold, exist_ok=True)
# save debug data
with open(self.hparams['debug_path'] + patient_fold + '/' + f'{data[:-4]}.json', 'w') as outfile:
json.dump(out_json, outfile)
return True
| [
"torch.manual_seed",
"os.makedirs",
"json.dump",
"numpy.random.seed",
"numpy.round"
] | [((116, 136), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (130, 136), True, 'import numpy as np\n'), ((186, 209), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (203, 209), False, 'import torch\n'), ((2196, 2265), 'os.makedirs', 'os.makedirs', (["(self.hparams['debug_path'] + patient_fold)"], {'exist_ok': '(True)'}), "(self.hparams['debug_path'] + patient_fold, exist_ok=True)\n", (2207, 2265), False, 'import os\n'), ((2422, 2450), 'json.dump', 'json.dump', (['out_json', 'outfile'], {}), '(out_json, outfile)\n', (2431, 2450), False, 'import json\n'), ((1761, 1790), 'numpy.round', 'np.round', (['contrastive_loss', '(2)'], {}), '(contrastive_loss, 2)\n', (1769, 1790), True, 'import numpy as np\n')] |
"""
Unit test for util (i.e. for the parameterisation maths)
"""
import numpy as np
from fourbody import util
def test_inv_mass_stationary():
"""
Test invariant mass of a stationary particle gets calculated correctly
"""
b0_mass = 5279.65
momentum = np.array([[0.0]])
energy = np.array([[b0_mass]])
assert np.allclose(
util._invariant_masses(momentum, momentum, momentum, energy),
np.array([[b0_mass]]),
)
def test_inv_mass():
"""
Test invariant mass of a stationary particle gets calculated correctly
"""
pi_mass = 139.57018
px = np.array([[-2405.25192233]])
py = np.array([[1017.71934261]])
pz = np.array([[-128.6045092]])
e = np.array([[2618.58901417]])
assert np.allclose(util._invariant_masses(px, py, pz, e), np.array([[pi_mass]]))
def test_m_plus_minus():
"""
Test we get the right invariant masses out
"""
k = np.array([[53.89743437], [-385.779829], [-1782.366763], [2116.4343876]])
pi1 = np.array([[-241.70298533], [-13.62889997], [1166.66733619], [1199.66603207]])
pi2 = np.array([[-1176.18918034], [612.05350502], [482.97638869], [1418.01851474]])
pi3 = np.array([[463.9947313], [-212.64477997], [132.72300175], [545.53106553]])
mplus, mminus = util.m_plus_minus(k, pi1, pi2, pi3)
assert np.allclose(mplus, util._invariant_masses(*np.add(k, pi3)))
assert np.allclose(mminus, util._invariant_masses(*np.add(pi1, pi2)))
def test_phi_range():
"""
Check we get the right phi back when we pass -ve or +ve phis
"""
phis = np.linspace(-np.pi, np.pi)
cosphis = np.cos(phis)
sinphis = np.sin(phis)
calculated_phis = util.phi(cosphis, sinphis)
assert np.allclose(calculated_phis, phis)
| [
"numpy.allclose",
"numpy.add",
"fourbody.util._invariant_masses",
"fourbody.util.m_plus_minus",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"fourbody.util.phi",
"numpy.sin"
] | [((275, 292), 'numpy.array', 'np.array', (['[[0.0]]'], {}), '([[0.0]])\n', (283, 292), True, 'import numpy as np\n'), ((306, 327), 'numpy.array', 'np.array', (['[[b0_mass]]'], {}), '([[b0_mass]])\n', (314, 327), True, 'import numpy as np\n'), ((609, 637), 'numpy.array', 'np.array', (['[[-2405.25192233]]'], {}), '([[-2405.25192233]])\n', (617, 637), True, 'import numpy as np\n'), ((647, 674), 'numpy.array', 'np.array', (['[[1017.71934261]]'], {}), '([[1017.71934261]])\n', (655, 674), True, 'import numpy as np\n'), ((684, 710), 'numpy.array', 'np.array', (['[[-128.6045092]]'], {}), '([[-128.6045092]])\n', (692, 710), True, 'import numpy as np\n'), ((719, 746), 'numpy.array', 'np.array', (['[[2618.58901417]]'], {}), '([[2618.58901417]])\n', (727, 746), True, 'import numpy as np\n'), ((932, 1004), 'numpy.array', 'np.array', (['[[53.89743437], [-385.779829], [-1782.366763], [2116.4343876]]'], {}), '([[53.89743437], [-385.779829], [-1782.366763], [2116.4343876]])\n', (940, 1004), True, 'import numpy as np\n'), ((1015, 1092), 'numpy.array', 'np.array', (['[[-241.70298533], [-13.62889997], [1166.66733619], [1199.66603207]]'], {}), '([[-241.70298533], [-13.62889997], [1166.66733619], [1199.66603207]])\n', (1023, 1092), True, 'import numpy as np\n'), ((1103, 1180), 'numpy.array', 'np.array', (['[[-1176.18918034], [612.05350502], [482.97638869], [1418.01851474]]'], {}), '([[-1176.18918034], [612.05350502], [482.97638869], [1418.01851474]])\n', (1111, 1180), True, 'import numpy as np\n'), ((1191, 1265), 'numpy.array', 'np.array', (['[[463.9947313], [-212.64477997], [132.72300175], [545.53106553]]'], {}), '([[463.9947313], [-212.64477997], [132.72300175], [545.53106553]])\n', (1199, 1265), True, 'import numpy as np\n'), ((1287, 1322), 'fourbody.util.m_plus_minus', 'util.m_plus_minus', (['k', 'pi1', 'pi2', 'pi3'], {}), '(k, pi1, pi2, pi3)\n', (1304, 1322), False, 'from fourbody import util\n'), ((1586, 1612), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi'], {}), '(-np.pi, np.pi)\n', (1597, 1612), True, 'import numpy as np\n'), ((1627, 1639), 'numpy.cos', 'np.cos', (['phis'], {}), '(phis)\n', (1633, 1639), True, 'import numpy as np\n'), ((1654, 1666), 'numpy.sin', 'np.sin', (['phis'], {}), '(phis)\n', (1660, 1666), True, 'import numpy as np\n'), ((1690, 1716), 'fourbody.util.phi', 'util.phi', (['cosphis', 'sinphis'], {}), '(cosphis, sinphis)\n', (1698, 1716), False, 'from fourbody import util\n'), ((1729, 1763), 'numpy.allclose', 'np.allclose', (['calculated_phis', 'phis'], {}), '(calculated_phis, phis)\n', (1740, 1763), True, 'import numpy as np\n'), ((361, 421), 'fourbody.util._invariant_masses', 'util._invariant_masses', (['momentum', 'momentum', 'momentum', 'energy'], {}), '(momentum, momentum, momentum, energy)\n', (383, 421), False, 'from fourbody import util\n'), ((431, 452), 'numpy.array', 'np.array', (['[[b0_mass]]'], {}), '([[b0_mass]])\n', (439, 452), True, 'import numpy as np\n'), ((771, 808), 'fourbody.util._invariant_masses', 'util._invariant_masses', (['px', 'py', 'pz', 'e'], {}), '(px, py, pz, e)\n', (793, 808), False, 'from fourbody import util\n'), ((810, 831), 'numpy.array', 'np.array', (['[[pi_mass]]'], {}), '([[pi_mass]])\n', (818, 831), True, 'import numpy as np\n'), ((1378, 1392), 'numpy.add', 'np.add', (['k', 'pi3'], {}), '(k, pi3)\n', (1384, 1392), True, 'import numpy as np\n'), ((1450, 1466), 'numpy.add', 'np.add', (['pi1', 'pi2'], {}), '(pi1, pi2)\n', (1456, 1466), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import sys
import pandas
import numpy
sys.path.append('../')
import heatmap
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.lines import Line2D
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['FreeSans', ]
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams['axes.titlepad'] = 4
if __name__ == '__main__':
df = pandas.read_csv('./rRNAs_analysis/final_distance_matrix.tsv', sep='\t',
index_col=0)
matrix = df.values
row_clustering_data={'data':matrix, 'method':'ward', 'metric':None, 'ratio':0.15}
col_clustering_data={'data':matrix, 'method':'ward', 'metric':None, 'ratio':0.15}
labels = [i for i in list(df.index)]
labels_for_plot = [i.capitalize() for i in labels]
labels_dict = dict(zip(labels, labels_for_plot))
colors_df = pandas.read_csv('./files/colors.csv', sep=',')
colors_taxonomy_mapping = dict(zip(colors_df.taxonomy.tolist(), colors_df.color.tolist()))
df = pandas.read_csv('./files/species.tsv', sep='\t')
species = df['abbreviation'].tolist()
species_taxonomy_mapping = dict(zip(df.species_name.tolist(), df.taxonomy.tolist()))
colors = [colors_taxonomy_mapping[species_taxonomy_mapping[i]] for i in labels]
colors_data = {'colors':colors, 'ratio':0.02}
colors_legend_data = {}
colors_legend_data.update({'patches': [[], []]})
for key, value in colors_taxonomy_mapping.items():
colors_legend_data['patches'][0].append(key.capitalize())
p = Line2D([0], [0], marker='o', color=value, markerfacecolor=value,
markersize=14, label=key.capitalize())
colors_legend_data['patches'][1].append(p)
colors_legend_data.update({'title':'Taxonomy\n'})
colors_legend_data.update({'bbox_anchor':(0.165,0.45), 'fontsize':10,
'handlelength':1.2, 'handletextpad':0.5,
'handleheight':2, 'title_size':12})
legend_labels = numpy.arange(0,1.01,0.2).round(2)
legend_title = 'Normalized\nDistance\n'
legend_data = {'x':0.09, 'y':0.47, 'w':0.03, 'h':0.25, 'labels':legend_labels,
'labels_size':10, 'cbar_kws':{'ticks':legend_labels},
'title':legend_title, 'title_size':12}
specific_labels_format = {}
for label in labels:
tmp_dict = {'color': colors_taxonomy_mapping[species_taxonomy_mapping[label]],
'weight':900}
specific_labels_format.update({labels_dict[label]:tmp_dict})
x_axis_data = {'labels': labels_for_plot, 'specific_labels_format':specific_labels_format,
'fontdict':{'size':0.4, 'rotation':90}}
y_axis_data = {'labels': labels_for_plot, 'specific_labels_format':specific_labels_format,
'fontdict':{'size':0.4}}
heatmap_data={'data':matrix, 'type':'distances', 'x_ratio':0.8, 'y_ratio':0.8}
cmap = LinearSegmentedColormap.from_list("my_colormap", ('#eaeaea', '#000000'), N=10)
c = heatmap.Clustergram(heatmap_data, figsize=(8,8), cmap=cmap,
y_axis_data=y_axis_data,
x_axis_data=x_axis_data,
row_clustering_data=row_clustering_data,
col_clustering_data=col_clustering_data,
row_colors_data = colors_data,
col_colors_data = colors_data,
colors_legend_data = colors_legend_data,
vmin=0.0, vmax=1, legend_data=legend_data,
linecolor='#e0e0e0', linewidth=0.005)
c.construction()
c.set_coords()
c.set_labels()
c.clustergrid.ax_heatmap.xaxis.set_tick_params(width=0.3, length=1.5, pad=1)
c.clustergrid.ax_heatmap.yaxis.set_tick_params(width=0.3, length=1.5, pad=1)
#c.clustergrid.savefig('./rRNAs_analysis/rRNAs_heatmap.tiff', dpi=800, format='tiff',
# pad_inches=0.02)
c.clustergrid.savefig('./rRNAs_analysis/rRNAs_heatmap.png', dpi=800, format='png',
pad_inches=0.02) | [
"pandas.read_csv",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"heatmap.Clustergram",
"sys.path.append",
"numpy.arange"
] | [((63, 85), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (78, 85), False, 'import sys\n'), ((400, 488), 'pandas.read_csv', 'pandas.read_csv', (['"""./rRNAs_analysis/final_distance_matrix.tsv"""'], {'sep': '"""\t"""', 'index_col': '(0)'}), "('./rRNAs_analysis/final_distance_matrix.tsv', sep='\\t',\n index_col=0)\n", (415, 488), False, 'import pandas\n'), ((881, 927), 'pandas.read_csv', 'pandas.read_csv', (['"""./files/colors.csv"""'], {'sep': '""","""'}), "('./files/colors.csv', sep=',')\n", (896, 927), False, 'import pandas\n'), ((1037, 1085), 'pandas.read_csv', 'pandas.read_csv', (['"""./files/species.tsv"""'], {'sep': '"""\t"""'}), "('./files/species.tsv', sep='\\t')\n", (1052, 1085), False, 'import pandas\n'), ((3026, 3104), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""my_colormap"""', "('#eaeaea', '#000000')"], {'N': '(10)'}), "('my_colormap', ('#eaeaea', '#000000'), N=10)\n", (3059, 3104), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((3118, 3511), 'heatmap.Clustergram', 'heatmap.Clustergram', (['heatmap_data'], {'figsize': '(8, 8)', 'cmap': 'cmap', 'y_axis_data': 'y_axis_data', 'x_axis_data': 'x_axis_data', 'row_clustering_data': 'row_clustering_data', 'col_clustering_data': 'col_clustering_data', 'row_colors_data': 'colors_data', 'col_colors_data': 'colors_data', 'colors_legend_data': 'colors_legend_data', 'vmin': '(0.0)', 'vmax': '(1)', 'legend_data': 'legend_data', 'linecolor': '"""#e0e0e0"""', 'linewidth': '(0.005)'}), "(heatmap_data, figsize=(8, 8), cmap=cmap, y_axis_data=\n y_axis_data, x_axis_data=x_axis_data, row_clustering_data=\n row_clustering_data, col_clustering_data=col_clustering_data,\n row_colors_data=colors_data, col_colors_data=colors_data,\n colors_legend_data=colors_legend_data, vmin=0.0, vmax=1, legend_data=\n legend_data, linecolor='#e0e0e0', linewidth=0.005)\n", (3137, 3511), False, 'import heatmap\n'), ((2047, 2073), 'numpy.arange', 'numpy.arange', (['(0)', '(1.01)', '(0.2)'], {}), '(0, 1.01, 0.2)\n', (2059, 2073), False, 'import numpy\n')] |
from distutils.version import LooseVersion
from io import StringIO
from itertools import product
from string import ascii_lowercase
import struct
import sys
import types
import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_equal,
)
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
import scipy
from scipy import stats
from scipy.optimize import OptimizeResult
import statsmodels.regression.linear_model as smlm
import statsmodels.tools as smtools
from arch.data import sp500
from arch.typing import Literal
from arch.univariate.base import ARCHModelForecast, ARCHModelResult, _align_forecast
from arch.univariate.distribution import (
GeneralizedError,
Normal,
SkewStudent,
StudentsT,
)
from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model
from arch.univariate.volatility import (
APARCH,
ARCH,
EGARCH,
FIGARCH,
GARCH,
HARCH,
ConstantVariance,
EWMAVariance,
FixedVariance,
MIDASHyperbolic,
RiskMetrics2006,
)
from arch.utility.exceptions import ConvergenceWarning, DataScaleWarning
USE_CYTHON = False
try:
import arch.univariate.recursions
USE_CYTHON = True
except ImportError:
import arch.univariate.recursions_python # noqa
if USE_CYTHON:
rec: types.ModuleType = arch.univariate.recursions
else:
rec = arch.univariate.recursions_python
try:
import matplotlib.pyplot # noqa
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
RTOL = 1e-4 if struct.calcsize("P") < 8 else 1e-6
DISPLAY: Literal["off"] = "off"
SP_LT_14 = LooseVersion(scipy.__version__) < LooseVersion("1.4")
SP500 = 100 * sp500.load()["Adj Close"].pct_change().dropna()
@pytest.fixture(scope="module", params=[True, False])
def simulated_data(request):
rs = np.random.RandomState(1)
zm = ZeroMean(volatility=GARCH(), distribution=Normal(seed=rs))
sim_data = zm.simulate(np.array([0.1, 0.1, 0.88]), 1000)
return np.asarray(sim_data.data) if request.param else sim_data.data
class TestMeanModel(object):
@classmethod
def setup_class(cls):
cls.rng = RandomState(1234)
cls.T = 1000
cls.resids = cls.rng.standard_normal(cls.T)
zm = ZeroMean()
zm.volatility = GARCH()
seed = 12345
random_state = np.random.RandomState(seed)
zm.distribution = Normal(seed=random_state)
sim_data = zm.simulate(np.array([0.1, 0.1, 0.8]), 1000)
with pytest.raises(ValueError):
zm.simulate(np.array([0.1, 0.1, 0.8]), 1000, initial_value=3.0)
date_index = pd.date_range("2000-12-31", periods=1000, freq="W")
cls.y = sim_data.data.values
cls.y_df = pd.DataFrame(
cls.y[:, None], columns=["LongVariableName"], index=date_index
)
cls.y_series = pd.Series(
cls.y, name="VeryVeryLongLongVariableName", index=date_index
)
x = cls.resids + cls.rng.standard_normal(cls.T)
cls.x = x[:, None]
cls.x_df = pd.DataFrame(cls.x, columns=["LongExogenousName"])
cls.resid_var = np.var(cls.resids)
cls.sigma2 = np.zeros_like(cls.resids)
cls.backcast = 1.0
def test_constant_mean(self):
cm = ConstantMean(self.y)
parameters = np.array([5.0, 1.0])
cm.simulate(parameters, self.T)
assert_equal(cm.num_params, 1)
with pytest.raises(ValueError):
cm.simulate(parameters, self.T, x=np.array(10))
bounds = cm.bounds()
assert_equal(bounds, [(-np.inf, np.inf)])
assert_equal(cm.constant, True)
a, b = cm.constraints()
assert_equal(a, np.empty((0, 1)))
assert_equal(b, np.empty((0,)))
assert isinstance(cm.volatility, ConstantVariance)
assert isinstance(cm.distribution, Normal)
assert cm.lags is None
res = cm.fit(disp=DISPLAY)
expected = np.array([self.y.mean(), self.y.var()])
assert_almost_equal(res.params, expected)
forecasts = res.forecast(horizon=20, start=20, reindex=False)
direct = pd.DataFrame(
index=np.arange(self.y.shape[0]),
columns=["h.{0:>02d}".format(i + 1) for i in range(20)],
dtype="double",
)
direct.iloc[20:, :] = res.params.iloc[0]
# TODO
# assert_frame_equal(direct, forecasts)
assert isinstance(forecasts, ARCHModelForecast)
assert isinstance(cm.__repr__(), str)
assert isinstance(cm.__str__(), str)
assert "<strong>" in cm._repr_html_()
with pytest.raises(ValueError, match="horizon must be an integer >= 1"):
res.forecast(horizon=0, start=20, reindex=False)
def test_zero_mean(self):
zm = ZeroMean(self.y)
parameters = np.array([1.0])
data = zm.simulate(parameters, self.T)
assert_equal(data.shape, (self.T, 3))
assert_equal(data["data"].shape[0], self.T)
assert_equal(zm.num_params, 0)
bounds = zm.bounds()
assert_equal(bounds, [])
assert_equal(zm.constant, False)
a, b = zm.constraints()
assert_equal(a, np.empty((0, 0)))
assert_equal(b, np.empty((0,)))
assert isinstance(zm.volatility, ConstantVariance)
assert isinstance(zm.distribution, Normal)
assert zm.lags is None
res = zm.fit(disp=DISPLAY)
assert_almost_equal(res.params, np.array([np.mean(self.y**2)]))
forecasts = res.forecast(horizon=99, reindex=False)
direct = pd.DataFrame(
index=np.arange(self.y.shape[0]),
columns=["h.{0:>02d}".format(i + 1) for i in range(99)],
dtype="double",
)
direct.iloc[:, :] = 0.0
assert isinstance(forecasts, ARCHModelForecast)
# TODO
# assert_frame_equal(direct, forecasts)
garch = GARCH()
zm.volatility = garch
zm.fit(update_freq=0, disp=DISPLAY)
assert isinstance(zm.__repr__(), str)
assert isinstance(zm.__str__(), str)
assert "<strong>" in zm._repr_html_()
def test_harx(self):
harx = HARX(self.y, self.x, lags=[1, 5, 22])
assert harx.x is self.x
params = np.array([1.0, 0.4, 0.3, 0.2, 1.0, 1.0])
harx.simulate(params, self.T, x=self.rng.randn(self.T + 500, 1))
iv = self.rng.randn(22, 1)
x = self.rng.randn(self.T + 500, 1)
alt_iv_data = harx.simulate(params, self.T, x=x, initial_value=1.0)
assert_equal(alt_iv_data.shape, (self.T, 3))
data = harx.simulate(params, self.T, x=x, initial_value=iv)
assert_equal(data.shape, (self.T, 3))
cols = ["data", "volatility", "errors"]
for c in cols:
assert c in data
bounds = harx.bounds()
for b in bounds:
assert_equal(b[0], -np.inf)
assert_equal(b[1], np.inf)
assert_equal(len(bounds), 5)
assert_equal(harx.num_params, 1 + 3 + self.x.shape[1])
assert_equal(harx.constant, True)
a, b = harx.constraints()
assert_equal(a, np.empty((0, 5)))
assert_equal(b, np.empty(0))
res = harx.fit(disp=DISPLAY)
with pytest.raises(ValueError):
res.forecast(params=np.array([1.0, 1.0]), reindex=False)
nobs = self.T - 22
rhs = np.ones((nobs, 5))
y = self.y
lhs = y[22:]
for i in range(self.T - 22):
rhs[i, 1] = y[i + 21]
rhs[i, 2] = np.mean(y[i + 17 : i + 22])
rhs[i, 3] = np.mean(y[i : i + 22])
rhs[:, 4] = self.x[22:, 0]
params = np.linalg.pinv(rhs).dot(lhs)
assert_almost_equal(params, res.params[:-1])
assert harx.hold_back is None
assert_equal(harx.lags, [1, 5, 22])
assert_equal(harx.name, "HAR-X")
assert_equal(harx.use_rotated, False)
assert isinstance(harx.__repr__(), str)
harx._repr_html_()
res = harx.fit(cov_type="classic", disp=DISPLAY)
assert isinstance(res.__repr__(), str)
def test_harx_error(self):
with pytest.raises(ValueError):
HARX(self.y, self.x, lags=[1, -5, 22])
with pytest.raises(ValueError):
HARX(self.y, self.x, lags=[0, 1, 5, 22])
with pytest.raises(ValueError):
HARX(self.y, self.x, lags=[[-1], [3]])
with pytest.raises(ValueError):
HARX(self.y, self.x, lags=[[0], [0]])
with pytest.raises(ValueError):
HARX(self.y, self.x, lags=[[1, 1, 3], [2, 3, 3]])
with pytest.raises(ValueError):
HARX(self.y, self.x, lags=[[[1], [3]]])
@pytest.mark.parametrize("constant", [True, False])
def test_har(self, constant):
har = HARX(self.y, lags=[1, 5, 22], constant=constant)
params = np.array([1.0, 0.4, 0.3, 0.2, 1.0])
if not constant:
params = params[1:]
data = har.simulate(params, self.T)
assert_equal(data.shape, (self.T, 3))
cols = ["data", "volatility", "errors"]
for c in cols:
assert c in data
bounds = har.bounds()
for b in bounds:
assert_equal(b[0], -np.inf)
assert_equal(b[1], np.inf)
assert_equal(len(bounds), 3 + int(constant))
assert_equal(har.num_params, 3 + int(constant))
assert_equal(har.constant, constant)
a, b = har.constraints()
assert_equal(a, np.empty((0, 3 + int(constant))))
assert_equal(b, np.empty(0))
res = har.fit(disp=DISPLAY)
nobs = self.T - 22
rhs = np.ones((nobs, 4))
y = self.y
lhs = y[22:]
for i in range(self.T - 22):
rhs[i, 1] = y[i + 21]
rhs[i, 2] = np.mean(y[i + 17 : i + 22])
rhs[i, 3] = np.mean(y[i : i + 22])
if not constant:
rhs = rhs[:, 1:]
params = np.linalg.pinv(rhs).dot(lhs)
assert_almost_equal(params, res.params[:-1])
with pytest.raises(ValueError):
res.forecast(horizon=6, start=0, reindex=False)
forecasts = res.forecast(horizon=6, reindex=False)
t = self.y.shape[0]
direct = pd.DataFrame(
index=np.arange(t),
columns=["h." + str(i + 1) for i in range(6)],
dtype="float64",
)
params = np.asarray(res.params)
fcast = np.zeros(t + 6)
for i in range(21, t):
fcast[: i + 1] = self.y[: i + 1]
fcast[i + 1 :] = 0.0
for h in range(6):
fcast[i + h + 1] = params[0]
fcast[i + h + 1] += params[1] * fcast[i + h : i + h + 1]
fcast[i + h + 1] += params[2] * fcast[i + h - 4 : i + h + 1].mean()
fcast[i + h + 1] += params[3] * fcast[i + h - 21 : i + h + 1].mean()
direct.iloc[i, :] = fcast[i + 1 : i + 7]
assert isinstance(forecasts, ARCHModelForecast)
# TODO
# assert_frame_equal(direct, forecasts)
forecasts = res.forecast(res.params, horizon=6, reindex=False)
assert isinstance(forecasts, ARCHModelForecast)
# TODO
# assert_frame_equal(direct, forecasts)
assert har.hold_back is None
assert_equal(har.lags, [1, 5, 22])
assert_equal(har.name, "HAR")
assert_equal(har.use_rotated, False)
har = HARX(self.y_series, lags=[1, 5, 22])
res = har.fit(disp=DISPLAY)
direct = pd.DataFrame(
index=self.y_series.index,
columns=["h." + str(i + 1) for i in range(6)],
dtype="float64",
)
forecasts = res.forecast(horizon=6, reindex=False)
params = np.asarray(res.params)
fcast = np.zeros(t + 6)
for i in range(21, t):
fcast[: i + 1] = self.y[: i + 1]
fcast[i + 1 :] = 0.0
for h in range(6):
fcast[i + h + 1] = params[0]
fcast[i + h + 1] += params[1] * fcast[i + h : i + h + 1]
fcast[i + h + 1] += params[2] * fcast[i + h - 4 : i + h + 1].mean()
fcast[i + h + 1] += params[3] * fcast[i + h - 21 : i + h + 1].mean()
direct.iloc[i, :] = fcast[i + 1 : i + 7]
assert isinstance(forecasts, ARCHModelForecast)
# TODO
# assert_frame_equal(direct, forecasts)
def test_arx(self):
arx = ARX(self.y, self.x, lags=3, hold_back=10, constant=False)
params = np.array([0.4, 0.3, 0.2, 1.0, 1.0])
data = arx.simulate(params, self.T, x=self.rng.randn(self.T + 500, 1))
assert isinstance(data, pd.DataFrame)
bounds = arx.bounds()
for b in bounds:
assert_equal(b[0], -np.inf)
assert_equal(b[1], np.inf)
assert_equal(len(bounds), 4)
assert_equal(arx.num_params, 4)
assert not arx.constant
a, b = arx.constraints()
assert_equal(a, np.empty((0, 4)))
assert_equal(b, np.empty(0))
res = arx.fit(last_obs=900, disp=DISPLAY)
assert res.fit_stop == 900
nobs = 900 - 10
rhs = np.zeros((nobs, 4))
y = self.y
lhs = y[10:900]
for i in range(10, 900):
rhs[i - 10, 0] = y[i - 1]
rhs[i - 10, 1] = y[i - 2]
rhs[i - 10, 2] = y[i - 3]
rhs[:, 3] = self.x[10:900, 0]
params = np.linalg.pinv(rhs).dot(lhs)
assert_almost_equal(params, res.params[:-1])
assert_equal(arx.hold_back, 10)
assert_equal(arx.lags, np.array([[1, 2, 3], [1, 2, 3]]))
assert_equal(arx.name, "AR-X")
assert_equal(arx.use_rotated, False)
assert isinstance(arx.__repr__(), str)
arx._repr_html_()
def test_ar(self):
ar = ARX(self.y, lags=3)
params = np.array([1.0, 0.4, 0.3, 0.2, 1.0])
data = ar.simulate(params, self.T)
assert len(data) == self.T
assert_equal(self.y, ar.y)
bounds = ar.bounds()
for b in bounds:
assert_equal(b[0], -np.inf)
assert_equal(b[1], np.inf)
assert_equal(len(bounds), 4)
assert_equal(ar.num_params, 4)
assert ar.constant
a, b = ar.constraints()
assert_equal(a, np.empty((0, 4)))
assert_equal(b, np.empty(0))
res = ar.fit(disp=DISPLAY)
nobs = 1000 - 3
rhs = np.ones((nobs, 4))
y = self.y
lhs = y[3:1000]
for i in range(3, 1000):
rhs[i - 3, 1] = y[i - 1]
rhs[i - 3, 2] = y[i - 2]
rhs[i - 3, 3] = y[i - 3]
params = np.linalg.pinv(rhs).dot(lhs)
assert_almost_equal(params, res.params[:-1])
forecasts = res.forecast(horizon=5, reindex=False)
direct = pd.DataFrame(
index=np.arange(y.shape[0]),
columns=["h." + str(i + 1) for i in range(5)],
dtype="float64",
)
params = res.params.iloc[:-1]
for i in range(2, y.shape[0]):
fcast = np.zeros(y.shape[0] + 5)
fcast[: y.shape[0]] = y.copy()
for h in range(1, 6):
reg = np.array(
[1.0, fcast[i + h - 1], fcast[i + h - 2], fcast[i + h - 3]]
)
fcast[i + h] = reg.dot(params)
direct.iloc[i, :] = fcast[i + 1 : i + 6]
assert isinstance(forecasts, ARCHModelForecast)
# TODO
# assert_frame_equal(direct, forecasts)
assert ar.hold_back is None
assert_equal(ar.lags, np.array([[1, 2, 3], [1, 2, 3]]))
assert_equal(ar.name, "AR")
assert_equal(ar.use_rotated, False)
ar.__repr__()
ar._repr_html_()
ar = ARX(self.y_df, lags=5)
ar.__repr__()
ar = ARX(self.y_series, lags=5)
ar.__repr__()
res = ar.fit(disp=DISPLAY)
assert isinstance(res.resid, pd.Series)
assert isinstance(res.conditional_volatility, pd.Series)
std_resid = res.resid / res.conditional_volatility
std_resid.name = "std_resid"
assert_series_equal(res.std_resid, std_resid)
# Smoke bootstrap
summ = ar.fit(disp=DISPLAY).summary()
assert "Df Model: 6" in str(summ)
assert "Constant Variance" in str(summ)
ar = ARX(self.y, lags=1, volatility=GARCH(), distribution=StudentsT())
res = ar.fit(disp=DISPLAY, update_freq=5, cov_type="classic")
assert isinstance(res.param_cov, pd.DataFrame)
sims = res.forecast(horizon=5, method="simulation", reindex=False)
assert isinstance(sims.simulations.residual_variances, np.ndarray)
assert isinstance(sims.simulations.residuals, np.ndarray)
assert isinstance(sims.simulations.values, np.ndarray)
assert isinstance(sims.simulations.variances, np.ndarray)
def test_ar_no_lags(self):
ar = ARX(self.y, lags=0)
assert ar.lags is None
res = ar.fit(disp=DISPLAY)
assert_almost_equal(res.params[0], self.y.mean())
assert "lags: none" in ar.__str__()
@pytest.mark.skipif(not HAS_MATPLOTLIB, reason="matplotlib not installed")
def test_ar_plot(self):
ar = ARX(self.y, lags=1, volatility=GARCH(), distribution=StudentsT())
res = ar.fit(disp=DISPLAY, update_freq=5, cov_type="mle")
res.plot()
res.plot(annualize="D")
res.plot(annualize="W")
res.plot(annualize="M")
with pytest.raises(ValueError):
res.plot(annualize="unknown")
import matplotlib.pyplot as plt
plt.close("all")
res.plot(scale=360)
res.hedgehog_plot(start=500)
res.hedgehog_plot(start=500, plot_type="mean")
res.hedgehog_plot(plot_type="volatility")
res.hedgehog_plot(start=500, method="simulation", simulations=100)
res.hedgehog_plot(plot_type="volatility", method="bootstrap")
plt.close("all")
def test_arch_arx(self):
self.rng.seed(12345)
x = self.rng.randn(500, 3)
y = x.sum(1) + 3 * self.rng.standard_normal(500)
am = ARX(y=y, x=x)
res = am.fit(disp=DISPLAY)
res.summary()
assert isinstance(res.optimization_result, OptimizeResult)
am.volatility = ARCH(p=2)
results = am.fit(update_freq=0, disp=DISPLAY)
assert isinstance(results.pvalues, pd.Series)
assert_equal(
list(results.pvalues.index),
["Const", "x0", "x1", "x2", "omega", "alpha[1]", "alpha[2]"],
)
am = ARX(y=y, lags=2, x=x)
res = am.fit(disp=DISPLAY)
summ = res.summary().as_text()
res_repr = res.__repr__()
assert str(hex(id(res))) in res_repr
assert summ[:10] == res_repr[:10]
am.volatility = ARCH(p=2)
results = am.fit(update_freq=0, disp=DISPLAY)
assert isinstance(results.pvalues, pd.Series)
assert_equal(
list(results.pvalues.index),
[
"Const",
"y[1]",
"y[2]",
"x0",
"x1",
"x2",
"omega",
"alpha[1]",
"alpha[2]",
],
)
x = pd.DataFrame(x, columns=["x0", "x1", "x2"])
y = pd.Series(y, name="y")
am = ARX(y=y, x=x)
am.fit(disp=DISPLAY).summary()
am.volatility = ARCH(p=2)
results = am.fit(update_freq=0, disp=DISPLAY)
assert isinstance(results.pvalues, pd.Series)
assert_equal(
list(results.pvalues.index),
["Const", "x0", "x1", "x2", "omega", "alpha[1]", "alpha[2]"],
)
def test_arch_model(self):
am = arch_model(self.y)
assert isinstance(am, ConstantMean)
assert isinstance(am.volatility, GARCH)
assert isinstance(am.distribution, Normal)
am = arch_model(self.y, mean="harx", lags=[1, 5, 22])
assert isinstance(am, HARX)
assert isinstance(am.volatility, GARCH)
am = arch_model(self.y, mean="har", lags=[1, 5, 22])
assert isinstance(am, HARX)
assert isinstance(am.volatility, GARCH)
am = arch_model(self.y, self.x, mean="ls")
assert isinstance(am, LS)
assert isinstance(am.volatility, GARCH)
am.__repr__()
am = arch_model(self.y, mean="arx", lags=[1, 5, 22])
assert isinstance(am, ARX)
assert isinstance(am.volatility, GARCH)
am = arch_model(self.y, mean="ar", lags=[1, 5, 22])
assert isinstance(am, ARX)
assert isinstance(am.volatility, GARCH)
am = arch_model(self.y, mean="ar", lags=None)
assert isinstance(am, ARX)
assert isinstance(am.volatility, GARCH)
am = arch_model(self.y, mean="zero")
assert isinstance(am, ZeroMean)
assert isinstance(am.volatility, GARCH)
am = arch_model(self.y, vol="Harch")
assert isinstance(am, ConstantMean)
assert isinstance(am.volatility, HARCH)
am = arch_model(self.y, vol="Constant")
assert isinstance(am, ConstantMean)
assert isinstance(am.volatility, ConstantVariance)
am = arch_model(self.y, vol="arch")
assert isinstance(am.volatility, ARCH)
am = arch_model(self.y, vol="egarch")
assert isinstance(am.volatility, EGARCH)
am = arch_model(self.y, vol="figarch")
assert isinstance(am.volatility, FIGARCH)
am = arch_model(self.y, vol="aparch")
assert isinstance(am.volatility, APARCH)
with pytest.raises(ValueError):
arch_model(self.y, mean="unknown")
with pytest.raises(ValueError):
arch_model(self.y, vol="unknown")
with pytest.raises(ValueError):
arch_model(self.y, dist="unknown")
am.fit(disp=DISPLAY)
def test_pandas(self):
am = arch_model(self.y_df, self.x_df, mean="ls")
assert isinstance(am, LS)
def test_summary(self):
am = arch_model(self.y, mean="ar", lags=[1, 3, 5])
res = am.fit(update_freq=0, disp=DISPLAY)
res.summary()
am = arch_model(self.y, mean="ar", lags=[1, 3, 5], dist="studentst")
assert isinstance(am.distribution, StudentsT)
res = am.fit(update_freq=0, disp=DISPLAY)
res.summary()
am = arch_model(self.y, mean="ar", lags=[1, 3, 5], dist="ged")
assert isinstance(am.distribution, GeneralizedError)
res = am.fit(update_freq=0, disp=DISPLAY)
res.summary()
am = arch_model(self.y, mean="ar", lags=[1, 3, 5], dist="skewt")
res = am.fit(update_freq=0, disp=DISPLAY)
assert isinstance(am.distribution, SkewStudent)
res.summary()
def test_errors(self):
with pytest.raises(ValueError):
ARX(self.y, lags=np.array([[1, 2], [3, 4]]))
x = self.rng.randn(self.y.shape[0] + 1, 1)
with pytest.raises(ValueError):
ARX(self.y, x=x)
with pytest.raises(ValueError):
HARX(self.y, lags=np.eye(3))
with pytest.raises(ValueError):
ARX(self.y, lags=-1)
with pytest.raises(ValueError):
ARX(self.y, x=self.rng.randn(1, 1), lags=-1)
ar = ARX(self.y, lags=1)
with pytest.raises(ValueError):
d = Normal()
ar.volatility = d
with pytest.raises(ValueError):
v = GARCH()
ar.distribution = v
x = self.rng.randn(1000, 1)
with pytest.raises(ValueError):
ar.simulate(np.ones(5), 100, x=x)
with pytest.raises(ValueError):
ar.simulate(np.ones(5), 100)
with pytest.raises(ValueError):
ar.simulate(np.ones(3), 100, initial_value=self.rng.standard_normal(10))
with pytest.raises(ValueError):
ar.volatility = ConstantVariance()
ar.fit(cov_type="unknown")
def test_warnings(self):
with warnings.catch_warnings(record=True) as w:
ARX(self.y, lags=[1, 2, 3, 12], hold_back=5)
assert_equal(len(w), 1)
with warnings.catch_warnings(record=True) as w:
HARX(self.y, lags=[[1, 1, 1], [2, 5, 22]], use_rotated=True)
assert_equal(len(w), 1)
def test_har_lag_specifications(self):
"""Test equivalence of alternative lag specifications"""
har = HARX(self.y, lags=[1, 2, 3])
har_r = HARX(self.y, lags=[1, 2, 3], use_rotated=True)
har_r_v2 = HARX(self.y, lags=3, use_rotated=True)
ar = ARX(self.y, lags=[1, 2, 3])
ar_v2 = ARX(self.y, lags=3)
res_har = har.fit(disp=DISPLAY)
res_har_r = har_r.fit(disp=DISPLAY)
res_har_r_v2 = har_r_v2.fit(disp=DISPLAY)
res_ar = ar.fit(disp=DISPLAY)
res_ar_v2 = ar_v2.fit(disp=DISPLAY)
assert_almost_equal(res_har.rsquared, res_har_r.rsquared)
assert_almost_equal(res_har_r_v2.rsquared, res_har_r.rsquared)
assert_almost_equal(np.asarray(res_ar.params), np.asarray(res_ar_v2.params))
assert_almost_equal(np.asarray(res_ar.params), np.asarray(res_har_r_v2.params))
assert_almost_equal(
np.asarray(res_ar.param_cov), np.asarray(res_har_r_v2.param_cov)
)
assert_almost_equal(
res_ar.conditional_volatility, res_har_r_v2.conditional_volatility
)
assert_almost_equal(res_ar.resid, res_har_r_v2.resid)
def test_starting_values(self):
am = arch_model(self.y, mean="ar", lags=[1, 3, 5])
res = am.fit(cov_type="classic", update_freq=0, disp=DISPLAY)
res2 = am.fit(starting_values=res.params, update_freq=0, disp=DISPLAY)
assert isinstance(res, ARCHModelResult)
assert isinstance(res2, ARCHModelResult)
assert len(res.params) == 7
assert len(res2.params) == 7
am = arch_model(self.y, mean="zero")
sv = np.array([1.0, 0.3, 0.8])
with warnings.catch_warnings(record=True) as w:
am.fit(starting_values=sv, update_freq=0, disp=DISPLAY)
assert_equal(len(w), 1)
def test_no_param_volatility(self):
cm = ConstantMean(self.y)
cm.volatility = EWMAVariance()
cm.fit(update_freq=0, disp=DISPLAY)
cm.volatility = RiskMetrics2006()
cm.fit(update_freq=0, disp=DISPLAY)
ar = ARX(self.y, lags=5)
ar.volatility = EWMAVariance()
ar.fit(update_freq=0, disp=DISPLAY)
ar.volatility = RiskMetrics2006()
ar.fit(update_freq=0, disp=DISPLAY)
assert "tau0" in str(ar.volatility)
assert "tau1" in str(ar.volatility)
assert "kmax" in str(ar.volatility)
def test_egarch(self):
cm = ConstantMean(self.y)
cm.volatility = EGARCH()
res = cm.fit(update_freq=0, disp=DISPLAY)
summ = res.summary()
assert "Df Model: 1" in str(summ)
cm.distribution = StudentsT()
cm.fit(update_freq=0, disp=DISPLAY)
def test_multiple_lags(self):
"""Smoke test to ensure models estimate with multiple lags"""
vp = {"garch": GARCH, "egarch": EGARCH, "harch": HARCH, "arch": ARCH}
cm = ConstantMean(self.y)
for name, process in vp.items():
cm.volatility = process()
cm.fit(update_freq=0, disp=DISPLAY)
for p in [1, 2, 3]:
for o in [1, 2, 3]:
for q in [1, 2, 3]:
if name in ("arch",):
cm.volatility = process(p=p + o + q)
cm.fit(update_freq=0, disp=DISPLAY)
elif name in ("harch",):
cm.volatility = process(lags=[p, p + o, p + o + q])
cm.fit(update_freq=0, disp=DISPLAY)
else:
cm.volatility = process(p=p, o=o, q=q)
cm.fit(update_freq=0, disp=DISPLAY)
def test_first_last_obs(self):
ar = ARX(self.y, lags=5, hold_back=100)
res = ar.fit(update_freq=0, disp=DISPLAY)
resids = res.resid
resid_copy = resids.copy()
resid_copy[:100] = np.nan
assert_equal(resids, resid_copy)
ar.volatility = GARCH()
res = ar.fit(update_freq=0, disp=DISPLAY)
resids = res.resid
resid_copy = resids.copy()
resid_copy[:100] = np.nan
assert_equal(resids, resid_copy)
ar = ARX(self.y, lags=5)
ar.volatility = GARCH()
res = ar.fit(update_freq=0, last_obs=500, disp=DISPLAY)
resids = res.resid
resid_copy = resids.copy()
resid_copy[500:] = np.nan
assert_equal(resids, resid_copy)
ar = ARX(self.y, lags=5, hold_back=100)
ar.volatility = GARCH()
res = ar.fit(update_freq=0, last_obs=500, disp=DISPLAY)
resids = res.resid
resid_copy = resids.copy()
resid_copy[:100] = np.nan
resid_copy[500:] = np.nan
assert_equal(resids, resid_copy)
vol = res.conditional_volatility
vol_copy = vol.copy()
vol_copy[:100] = np.nan
vol_copy[500:] = np.nan
assert_equal(vol, vol_copy)
assert_equal(self.y.shape[0], vol.shape[0])
ar = ARX(self.y, lags=5)
ar.volatility = GARCH()
res = ar.fit(update_freq=0, last_obs=500, disp=DISPLAY)
resids = res.resid
resid_copy = resids.copy()
resid_copy[:5] = np.nan
resid_copy[500:] = np.nan
assert_equal(resids, resid_copy)
def test_date_first_last_obs(self):
y = self.y_series
cm = ConstantMean(y)
res = cm.fit(last_obs=y.index[900], disp=DISPLAY)
cm = ConstantMean(y)
res2 = cm.fit(last_obs=900, disp=DISPLAY)
assert_equal(res.resid.values, res2.resid.values)
def test_align(self):
dates = pd.date_range("2000-01-01", "2010-01-01", freq="M")
columns = ["h." + "{0:>02}".format(h + 1) for h in range(10)]
forecasts = pd.DataFrame(self.rng.randn(120, 10), index=dates, columns=columns)
aligned = _align_forecast(forecasts.copy(), align="origin")
assert_frame_equal(aligned, forecasts)
aligned = _align_forecast(forecasts.copy(), align="target")
direct = forecasts.copy()
for i in range(10):
direct.iloc[(i + 1) :, i] = direct.iloc[: (120 - i - 1), i].values
direct.iloc[: (i + 1), i] = np.nan
assert_frame_equal(aligned, direct)
with pytest.raises(ValueError):
_align_forecast(forecasts, align="unknown")
def test_fixed_user_parameters(self):
am = arch_model(self.y_series)
res = am.fit(disp=DISPLAY)
fixed_res = am.fix(res.params)
assert_series_equal(
res.conditional_volatility, fixed_res.conditional_volatility
)
assert_series_equal(res.params, fixed_res.params)
assert_equal(res.aic, fixed_res.aic)
assert_equal(res.bic, fixed_res.bic)
assert_equal(res.loglikelihood, fixed_res.loglikelihood)
assert_equal(res.num_params, fixed_res.num_params)
# Smoke for summary
fixed_res.summary()
def test_fixed_user_parameters_new_model(self):
am = arch_model(self.y_series)
res = am.fit(disp=DISPLAY)
new_am = arch_model(self.y_series)
fixed_res = new_am.fix(res.params)
assert_series_equal(
res.conditional_volatility, fixed_res.conditional_volatility
)
assert_series_equal(res.params, fixed_res.params)
assert_equal(res.aic, fixed_res.aic)
assert_equal(res.bic, fixed_res.bic)
assert_equal(res.loglikelihood, fixed_res.loglikelihood)
assert_equal(res.num_params, fixed_res.num_params)
# Test first and last dates
am = arch_model(self.y_series)
res = am.fit(disp=DISPLAY, first_obs=100, last_obs=900)
new_am = arch_model(self.y_series)
fixed_res = new_am.fix(res.params, first_obs=100, last_obs=900)
assert_series_equal(res.params, fixed_res.params)
assert_equal(res.aic, fixed_res.aic)
assert_equal(res.bic, fixed_res.bic)
assert_equal(res.loglikelihood, fixed_res.loglikelihood)
assert_equal(res.num_params, fixed_res.num_params)
def test_output_options(self):
am = arch_model(self.y_series)
orig_stdout = sys.stdout
try:
sio = StringIO()
sys.stdout = sio
am.fit(disp=DISPLAY)
sio.seek(0)
output = sio.read()
assert len(output) == 0
finally:
sys.stdout = orig_stdout
def test_convergence_warning(self):
y = np.array(
[
0.83277114,
0.45194014,
-0.33475561,
-0.49463896,
0.54715787,
1.11895382,
1.31280266,
0.81464021,
0.8532107,
1.0967188,
0.9346354,
0.92289249,
1.01339085,
1.071065,
1.42413486,
1.15392453,
1.10929691,
0.96162061,
0.96489515,
0.93250153,
1.34509807,
1.80951607,
1.66313783,
1.38610821,
1.26381761,
]
)
am = arch_model(y, mean="ARX", lags=10, p=5, q=0)
warning = ConvergenceWarning if SP_LT_14 else None
with pytest.warns(warning):
am.fit(disp=DISPLAY)
with pytest.warns(warning):
am.fit(show_warning=True, disp=DISPLAY)
with pytest.warns(DataScaleWarning):
am.fit(show_warning=False, disp=DISPLAY)
def test_first_after_last(self):
am = arch_model(self.y_series)
with pytest.raises(ValueError):
am.fit(disp=DISPLAY, first_obs=500, last_obs=480)
with pytest.raises(ValueError):
am.fit(
disp=DISPLAY,
first_obs=self.y_series.index[500],
last_obs=self.y_series.index[480],
)
def test_sample_adjustment(self):
am = arch_model(self.y_series, vol="Constant")
res = am.fit(disp=DISPLAY)
res_adj = am.fit(disp=DISPLAY, first_obs=0, last_obs=self.y_series.shape[0] + 1)
assert_equal(res.resid.values, res_adj.resid.values)
assert_equal(res.params.values, res_adj.params.values)
res = am.fit(disp=DISPLAY, first_obs=100)
assert res.fit_start == 100
res_adj = am.fit(disp=DISPLAY, first_obs=self.y_series.index[100])
assert_equal(res.params.values, res_adj.params.values)
assert_equal(res.resid.values, res_adj.resid.values)
res = am.fit(disp=DISPLAY, last_obs=900)
res2 = am.fit(disp=DISPLAY, last_obs=self.y_series.index[900])
assert_equal(res.params.values, res2.params.values)
assert_equal(res.resid.values, res2.resid.values)
res = am.fit(disp=DISPLAY, first_obs=100, last_obs=900)
res2 = am.fit(
disp=DISPLAY,
first_obs=self.y_series.index[100],
last_obs=self.y_series.index[900],
)
assert_equal(res.params.values, res2.params.values)
assert_equal(res.resid.values, res2.resid.values)
def test_model_obs_equivalence(self):
"""Tests models that should use the same observation"""
am = arch_model(self.y_series.iloc[100:900])
res = am.fit(disp=DISPLAY)
am = arch_model(self.y_series)
res2 = am.fit(disp=DISPLAY, first_obs=100, last_obs=900)
index = self.y_series.index
res3 = am.fit(disp=DISPLAY, first_obs=index[100], last_obs=index[900])
assert_equal(res.params.values, res2.params.values)
assert_equal(res2.params.values, res3.params.values)
am = arch_model(self.y_series, hold_back=100)
res4 = am.fit(disp=DISPLAY, last_obs=900)
assert_equal(res.params.values, res4.params.values)
def test_model_obs_equivalence_ar(self):
"""Tests models that should use the same observation"""
am = arch_model(self.y_series.iloc[100:900], mean="AR", lags=[1, 2, 4])
res = am.fit(disp=DISPLAY)
am = arch_model(self.y_series, mean="AR", lags=[1, 2, 4])
res2 = am.fit(disp=DISPLAY, first_obs=100, last_obs=900)
index = self.y_series.index
res3 = am.fit(disp=DISPLAY, first_obs=index[100], last_obs=index[900])
assert_almost_equal(res.params.values, res2.params.values, decimal=4)
assert_almost_equal(res2.params.values, res3.params.values, decimal=4)
am = arch_model(self.y_series, mean="AR", lags=[1, 2, 4], hold_back=100)
res4 = am.fit(disp=DISPLAY, first_obs=4, last_obs=900)
assert_almost_equal(res.params.values, res4.params.values, decimal=4)
assert am.hold_back == 100
def test_constant_mean_fixed_variance(self):
rng = RandomState(1234)
variance = 2 + rng.standard_normal(self.y.shape[0]) ** 2.0
std = np.sqrt(variance)
y = pd.Series(
std * rng.standard_normal(self.y_series.shape[0]), index=self.y_series.index
)
mod = ConstantMean(y, volatility=FixedVariance(variance))
res = mod.fit(disp=DISPLAY)
res.summary()
assert len(res.params) == 2
assert "scale" in res.params.index
mod = ARX(self.y_series, lags=[1, 2, 3], volatility=FixedVariance(variance))
res = mod.fit(disp=DISPLAY)
assert len(res.params) == 5
assert "scale" in res.params.index
mod = ARX(
self.y_series,
lags=[1, 2, 3],
volatility=FixedVariance(variance, unit_scale=True),
)
res = mod.fit(disp=DISPLAY)
assert len(res.params) == 4
assert "scale" not in res.params.index
def test_optimization_options(self):
norm = Normal(seed=RandomState([12891298, 843084]))
am = arch_model(None)
am.distribution = norm
data = am.simulate(np.array([0.0, 0.1, 0.1, 0.85]), 2500)
am = arch_model(data.data)
std = am.fit(disp=DISPLAY)
loose = am.fit(tol=1e-2, disp=DISPLAY)
assert std.loglikelihood >= loose.loglikelihood
with warnings.catch_warnings(record=True) as w:
short = am.fit(options={"maxiter": 3}, disp=DISPLAY)
assert len(w) == 1
assert std.loglikelihood >= short.loglikelihood
assert short.convergence_flag != 0
def test_little_or_no_data(self):
mod = HARX(self.y[:24], lags=[1, 5, 22])
with pytest.raises(ValueError):
mod.fit(disp=DISPLAY)
mod = HARX(None, lags=[1, 5, 22])
with pytest.raises(RuntimeError):
mod.fit(disp=DISPLAY)
def test_empty_mean(self):
mod = HARX(
self.y,
None,
None,
False,
volatility=ConstantVariance(),
distribution=Normal(),
)
res = mod.fit(disp=DISPLAY)
mod = ZeroMean(self.y, volatility=ConstantVariance(), distribution=Normal())
res_z = mod.fit(disp=DISPLAY)
assert res.num_params == res_z.num_params
assert_series_equal(res.params, res_z.params)
assert res.loglikelihood == res_z.loglikelihood
@pytest.mark.parametrize(
"volatility",
[GARCH, EGARCH, RiskMetrics2006, EWMAVariance, HARCH, ConstantVariance],
)
def test_backcast(volatility, simulated_data):
zm = ZeroMean(simulated_data, volatility=volatility())
res = zm.fit(disp=DISPLAY)
bc = zm.volatility.backcast(np.asarray(res.resid))
if volatility is EGARCH:
bc = np.exp(bc)
res2 = zm.fit(backcast=bc, disp=DISPLAY)
assert_array_almost_equal(res.params, res2.params)
if volatility is RiskMetrics2006:
zm.fit(backcast=bc[0], disp=DISPLAY)
def test_backcast_error(simulated_data):
zm = ZeroMean(simulated_data, volatility=GARCH())
with pytest.raises(ValueError):
zm.fit(backcast=-1, disp=DISPLAY)
zm = ZeroMean(simulated_data, volatility=RiskMetrics2006())
with pytest.raises(ValueError):
zm.fit(backcast=np.ones(100), disp=DISPLAY)
@pytest.mark.parametrize(
"volatility",
[
ConstantVariance,
GARCH,
EGARCH,
FIGARCH,
APARCH,
HARCH,
MIDASHyperbolic,
RiskMetrics2006,
EWMAVariance,
],
)
def test_fit_smoke(simulated_data, volatility):
zm = ZeroMean(simulated_data, volatility=volatility())
zm.fit(disp=DISPLAY)
def test_arch_lm(simulated_data):
zm = ZeroMean(simulated_data, volatility=GARCH())
res = zm.fit(disp=DISPLAY)
wald = res.arch_lm_test()
nobs = simulated_data.shape[0]
df = int(np.ceil(12.0 * np.power(nobs / 100.0, 1 / 4.0)))
assert wald.df == df
assert "Standardized" not in wald.null
assert "Standardized" not in wald.alternative
assert "H0: Standardized" not in wald.__repr__()
assert "heteroskedastic" in wald.__repr__()
resids2 = pd.Series(res.resid**2)
data = [resids2.shift(i) for i in range(df + 1)]
data = pd.concat(data, axis=1).dropna()
lhs = data.iloc[:, 0]
rhs = smtools.add_constant(data.iloc[:, 1:])
ols_res = smlm.OLS(lhs, rhs).fit()
assert_almost_equal(wald.stat, nobs * ols_res.rsquared)
assert len(wald.critical_values) == 3
assert "10%" in wald.critical_values
wald = res.arch_lm_test(lags=5)
assert wald.df == 5
assert_almost_equal(wald.pval, 1 - stats.chi2(5).cdf(wald.stat))
wald = res.arch_lm_test(standardized=True)
assert wald.df == df
assert "Standardized" in wald.null
assert "Standardized" in wald.alternative
assert_almost_equal(wald.pval, 1 - stats.chi2(df).cdf(wald.stat))
assert "H0: Standardized" in wald.__repr__()
def test_autoscale():
rs = np.random.RandomState(34254321)
dist = Normal(seed=rs)
am = arch_model(None)
am.distribution = dist
data = am.simulate([0, 0.0001, 0.05, 0.94], nobs=1000)
am = arch_model(data.data)
with pytest.warns(DataScaleWarning):
res = am.fit(disp=DISPLAY)
assert_almost_equal(res.scale, 1.0)
am = arch_model(data.data, rescale=True)
res_auto = am.fit(disp=DISPLAY)
assert_almost_equal(res_auto.scale, 10.0)
am = arch_model(10 * data.data)
res_manual = am.fit(disp=DISPLAY)
assert_series_equal(res_auto.params, res_manual.params)
res_no = arch_model(data.data, rescale=False).fit(disp=DISPLAY)
assert res_no.scale == 1.0
am = arch_model(10000 * data.data, rescale=True)
res_big = am.fit(disp=DISPLAY)
assert_almost_equal(res_big.scale, 0.1)
def test_no_variance():
mod = arch_model(np.ones(100))
with pytest.warns(ConvergenceWarning):
mod.fit(disp=DISPLAY)
def test_1d_exog():
rs = np.random.RandomState(329302)
y = rs.standard_normal((300))
x = rs.standard_normal((300))
am = arch_model(y, x, mean="ARX", lags=2, vol="ARCH", q=0)
res = am.fit()
am = arch_model(y, x[:, None], mean="ARX", lags=2, vol="ARCH", q=0)
res2 = am.fit()
assert_series_equal(res.params, res2.params)
def test_harx_lag_spec(simulated_data):
harx_1 = HARX(simulated_data, lags=[1, 5, 22])
harx_2 = HARX(simulated_data, lags=[1, 5, 22], use_rotated=True)
harx_3 = HARX(simulated_data, lags=[[1, 1, 1], [1, 5, 22]])
harx_4 = HARX(simulated_data, lags=[[1, 2, 6], [1, 5, 22]])
r2 = harx_1.fit().rsquared
assert_almost_equal(harx_2.fit().rsquared, r2)
assert_almost_equal(harx_3.fit().rsquared, r2)
assert_almost_equal(harx_4.fit().rsquared, r2)
def test_backcast_restricted(simulated_data):
# GH 440
mod = arch_model(simulated_data)
res = mod.fit(disp="off")
subset = (
simulated_data[100:600]
if isinstance(simulated_data, np.ndarray)
else simulated_data.iloc[100:600]
)
mod_restricted = arch_model(subset)
res_restricted = mod_restricted.fit(disp="off")
res_limited = mod.fit(first_obs=100, last_obs=600, disp="off")
assert_almost_equal(res_restricted.model._backcast, res_restricted.model._backcast)
assert np.abs(res.model._backcast - res_limited.model._backcast) > 1e-8
def test_missing_data_exception():
y = np.random.standard_normal(1000)
y[::29] = np.nan
with pytest.raises(ValueError, match="NaN or inf values"):
arch_model(y)
y = np.random.standard_normal(1000)
y[::53] = np.inf
with pytest.raises(ValueError, match="NaN or inf values"):
arch_model(y)
y[::29] = np.nan
y[::53] = np.inf
with pytest.raises(ValueError, match="NaN or inf values"):
arch_model(y)
@pytest.mark.parametrize("first_obs", [None, 250])
@pytest.mark.parametrize("last_obs", [None, 2750])
@pytest.mark.parametrize("vol", [RiskMetrics2006(), EWMAVariance()])
def test_parameterless_fit(first_obs, last_obs, vol):
base = ConstantMean(SP500, volatility=vol)
base_res = base.fit(first_obs=first_obs, last_obs=last_obs, disp="off")
mod = ZeroMean(SP500, volatility=vol)
res = mod.fit(first_obs=first_obs, last_obs=last_obs, disp="off")
assert res.conditional_volatility.shape == base_res.conditional_volatility.shape
def test_invalid_vol_dist():
with pytest.raises(TypeError, match="volatility must inherit"):
ConstantMean(SP500, volatility="GARCH")
with pytest.raises(TypeError, match="distribution must inherit"):
ConstantMean(SP500, distribution="Skew-t")
def test_param_cov():
mod = ConstantMean(SP500)
res = mod.fit(disp="off")
mod._backcast = None
cov = mod.compute_param_cov(res.params)
k = res.params.shape[0]
assert cov.shape == (k, k)
@pytest.mark.skipif(not HAS_MATPLOTLIB, reason="matplotlib not installed")
def test_plot_bad_index():
import matplotlib.pyplot as plt
idx = sorted([f"{a}{b}{c}" for a, b, c, in product(*([ascii_lowercase] * 3))])
sp500_copy = SP500.copy()
sp500_copy.index = idx[: sp500_copy.shape[0]]
res = ConstantMean(sp500_copy).fit(disp=False)
fig = res.plot()
assert isinstance(fig, plt.Figure)
def test_false_reindex():
res = ConstantMean(SP500, volatility=GARCH()).fit(disp="off")
fcast = res.forecast(start=0, reindex=True)
assert fcast.mean.shape[0] == SP500.shape[0]
assert_series_equal(pd.Series(fcast.mean.index), pd.Series(SP500.index))
def test_invalid_arch_model():
with pytest.raises(AssertionError):
arch_model(SP500, p="3")
def test_last_obs_equiv():
y = SP500.iloc[:-100]
res1 = arch_model(y).fit(disp=False)
res2 = arch_model(SP500).fit(last_obs=SP500.index[-100], disp=False)
assert_allclose(res1.model._backcast, res2.model._backcast, rtol=1e-6)
assert_allclose(res1.params, res2.params, rtol=1e-6)
@pytest.mark.parametrize("first", [0, 250])
@pytest.mark.parametrize("last", [0, 250])
@pytest.mark.parametrize("mean", ["Constant", "AR"])
def test_last_obs_equiv_param(first, last, mean):
lags = None if mean == "constant" else 2
nobs = SP500.shape[0]
last_obs = SP500.index[-last] if last else None
y = SP500.iloc[first : nobs - last]
res1 = arch_model(y, mean=mean, lags=lags).fit(disp=False)
res2 = arch_model(SP500, mean=mean, lags=lags).fit(
first_obs=y.index[0], last_obs=last_obs, disp=False
)
cv1 = res1.conditional_volatility
cv2 = res2.conditional_volatility
assert np.isfinite(cv1).sum() == np.isfinite(cv2).sum()
r1 = res1.resid
r2 = res2.resid
assert np.isfinite(r1).sum() == np.isfinite(r2).sum()
assert_allclose(res1.model._backcast, res2.model._backcast, rtol=RTOL)
assert_allclose(res1.params, res2.params, rtol=RTOL)
assert_allclose(cv1[np.isfinite(cv1)], cv2[np.isfinite(cv2)], rtol=RTOL)
| [
"numpy.random.standard_normal",
"struct.calcsize",
"numpy.sqrt",
"numpy.testing.assert_equal",
"numpy.linalg.pinv",
"arch.univariate.volatility.ARCH",
"numpy.array",
"scipy.stats.chi2",
"numpy.isfinite",
"pytest.fixture",
"pandas.testing.assert_frame_equal",
"arch.univariate.mean.ConstantMean"... | [((1871, 1923), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'params': '[True, False]'}), "(scope='module', params=[True, False])\n", (1885, 1923), False, 'import pytest\n'), ((39622, 39736), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""volatility"""', '[GARCH, EGARCH, RiskMetrics2006, EWMAVariance, HARCH, ConstantVariance]'], {}), "('volatility', [GARCH, EGARCH, RiskMetrics2006,\n EWMAVariance, HARCH, ConstantVariance])\n", (39645, 39736), False, 'import pytest\n'), ((40502, 40650), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""volatility"""', '[ConstantVariance, GARCH, EGARCH, FIGARCH, APARCH, HARCH, MIDASHyperbolic,\n RiskMetrics2006, EWMAVariance]'], {}), "('volatility', [ConstantVariance, GARCH, EGARCH,\n FIGARCH, APARCH, HARCH, MIDASHyperbolic, RiskMetrics2006, EWMAVariance])\n", (40525, 40650), False, 'import pytest\n'), ((44999, 45048), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""first_obs"""', '[None, 250]'], {}), "('first_obs', [None, 250])\n", (45022, 45048), False, 'import pytest\n'), ((45050, 45099), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""last_obs"""', '[None, 2750]'], {}), "('last_obs', [None, 2750])\n", (45073, 45099), False, 'import pytest\n'), ((46026, 46099), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_MATPLOTLIB)'], {'reason': '"""matplotlib not installed"""'}), "(not HAS_MATPLOTLIB, reason='matplotlib not installed')\n", (46044, 46099), False, 'import pytest\n'), ((47116, 47158), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""first"""', '[0, 250]'], {}), "('first', [0, 250])\n", (47139, 47158), False, 'import pytest\n'), ((47160, 47201), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""last"""', '[0, 250]'], {}), "('last', [0, 250])\n", (47183, 47201), False, 'import pytest\n'), ((47203, 47254), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mean"""', "['Constant', 'AR']"], {}), "('mean', ['Constant', 'AR'])\n", (47226, 47254), False, 'import pytest\n'), ((1752, 1783), 'distutils.version.LooseVersion', 'LooseVersion', (['scipy.__version__'], {}), '(scipy.__version__)\n', (1764, 1783), False, 'from distutils.version import LooseVersion\n'), ((1786, 1805), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.4"""'], {}), "('1.4')\n", (1798, 1805), False, 'from distutils.version import LooseVersion\n'), ((1962, 1986), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (1983, 1986), True, 'import numpy as np\n'), ((8788, 8838), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""constant"""', '[True, False]'], {}), "('constant', [True, False])\n", (8811, 8838), False, 'import pytest\n'), ((17191, 17264), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not HAS_MATPLOTLIB)'], {'reason': '"""matplotlib not installed"""'}), "(not HAS_MATPLOTLIB, reason='matplotlib not installed')\n", (17209, 17264), False, 'import pytest\n'), ((40038, 40088), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res.params', 'res2.params'], {}), '(res.params, res2.params)\n', (40063, 40088), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((41351, 41376), 'pandas.Series', 'pd.Series', (['(res.resid ** 2)'], {}), '(res.resid ** 2)\n', (41360, 41376), True, 'import pandas as pd\n'), ((41508, 41546), 'statsmodels.tools.add_constant', 'smtools.add_constant', (['data.iloc[:, 1:]'], {}), '(data.iloc[:, 1:])\n', (41528, 41546), True, 'import statsmodels.tools as smtools\n'), ((41590, 41645), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['wald.stat', '(nobs * ols_res.rsquared)'], {}), '(wald.stat, nobs * ols_res.rsquared)\n', (41609, 41645), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((42169, 42200), 'numpy.random.RandomState', 'np.random.RandomState', (['(34254321)'], {}), '(34254321)\n', (42190, 42200), True, 'import numpy as np\n'), ((42212, 42227), 'arch.univariate.distribution.Normal', 'Normal', ([], {'seed': 'rs'}), '(seed=rs)\n', (42218, 42227), False, 'from arch.univariate.distribution import GeneralizedError, Normal, SkewStudent, StudentsT\n'), ((42237, 42253), 'arch.univariate.mean.arch_model', 'arch_model', (['None'], {}), '(None)\n', (42247, 42253), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((42349, 42370), 'arch.univariate.mean.arch_model', 'arch_model', (['data.data'], {}), '(data.data)\n', (42359, 42370), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((42451, 42486), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.scale', '(1.0)'], {}), '(res.scale, 1.0)\n', (42470, 42486), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((42497, 42532), 'arch.univariate.mean.arch_model', 'arch_model', (['data.data'], {'rescale': '(True)'}), '(data.data, rescale=True)\n', (42507, 42532), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((42573, 42614), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res_auto.scale', '(10.0)'], {}), '(res_auto.scale, 10.0)\n', (42592, 42614), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((42625, 42651), 'arch.univariate.mean.arch_model', 'arch_model', (['(10 * data.data)'], {}), '(10 * data.data)\n', (42635, 42651), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((42694, 42749), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['res_auto.params', 'res_manual.params'], {}), '(res_auto.params, res_manual.params)\n', (42713, 42749), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((42860, 42903), 'arch.univariate.mean.arch_model', 'arch_model', (['(10000 * data.data)'], {'rescale': '(True)'}), '(10000 * data.data, rescale=True)\n', (42870, 42903), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((42943, 42982), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res_big.scale', '(0.1)'], {}), '(res_big.scale, 0.1)\n', (42962, 42982), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((43148, 43177), 'numpy.random.RandomState', 'np.random.RandomState', (['(329302)'], {}), '(329302)\n', (43169, 43177), True, 'import numpy as np\n'), ((43255, 43308), 'arch.univariate.mean.arch_model', 'arch_model', (['y', 'x'], {'mean': '"""ARX"""', 'lags': '(2)', 'vol': '"""ARCH"""', 'q': '(0)'}), "(y, x, mean='ARX', lags=2, vol='ARCH', q=0)\n", (43265, 43308), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((43337, 43399), 'arch.univariate.mean.arch_model', 'arch_model', (['y', 'x[:, None]'], {'mean': '"""ARX"""', 'lags': '(2)', 'vol': '"""ARCH"""', 'q': '(0)'}), "(y, x[:, None], mean='ARX', lags=2, vol='ARCH', q=0)\n", (43347, 43399), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((43424, 43468), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['res.params', 'res2.params'], {}), '(res.params, res2.params)\n', (43443, 43468), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((43524, 43561), 'arch.univariate.mean.HARX', 'HARX', (['simulated_data'], {'lags': '[1, 5, 22]'}), '(simulated_data, lags=[1, 5, 22])\n', (43528, 43561), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((43575, 43630), 'arch.univariate.mean.HARX', 'HARX', (['simulated_data'], {'lags': '[1, 5, 22]', 'use_rotated': '(True)'}), '(simulated_data, lags=[1, 5, 22], use_rotated=True)\n', (43579, 43630), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((43644, 43694), 'arch.univariate.mean.HARX', 'HARX', (['simulated_data'], {'lags': '[[1, 1, 1], [1, 5, 22]]'}), '(simulated_data, lags=[[1, 1, 1], [1, 5, 22]])\n', (43648, 43694), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((43708, 43758), 'arch.univariate.mean.HARX', 'HARX', (['simulated_data'], {'lags': '[[1, 2, 6], [1, 5, 22]]'}), '(simulated_data, lags=[[1, 2, 6], [1, 5, 22]])\n', (43712, 43758), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((44015, 44041), 'arch.univariate.mean.arch_model', 'arch_model', (['simulated_data'], {}), '(simulated_data)\n', (44025, 44041), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((44238, 44256), 'arch.univariate.mean.arch_model', 'arch_model', (['subset'], {}), '(subset)\n', (44248, 44256), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((44380, 44468), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res_restricted.model._backcast', 'res_restricted.model._backcast'], {}), '(res_restricted.model._backcast, res_restricted.model.\n _backcast)\n', (44399, 44468), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((44585, 44616), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(1000)'], {}), '(1000)\n', (44610, 44616), True, 'import numpy as np\n'), ((44731, 44762), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(1000)'], {}), '(1000)\n', (44756, 44762), True, 'import numpy as np\n'), ((45234, 45269), 'arch.univariate.mean.ConstantMean', 'ConstantMean', (['SP500'], {'volatility': 'vol'}), '(SP500, volatility=vol)\n', (45246, 45269), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((45356, 45387), 'arch.univariate.mean.ZeroMean', 'ZeroMean', (['SP500'], {'volatility': 'vol'}), '(SP500, volatility=vol)\n', (45364, 45387), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((45845, 45864), 'arch.univariate.mean.ConstantMean', 'ConstantMean', (['SP500'], {}), '(SP500)\n', (45857, 45864), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((46985, 47056), 'numpy.testing.assert_allclose', 'assert_allclose', (['res1.model._backcast', 'res2.model._backcast'], {'rtol': '(1e-06)'}), '(res1.model._backcast, res2.model._backcast, rtol=1e-06)\n', (47000, 47056), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((47060, 47113), 'numpy.testing.assert_allclose', 'assert_allclose', (['res1.params', 'res2.params'], {'rtol': '(1e-06)'}), '(res1.params, res2.params, rtol=1e-06)\n', (47075, 47113), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((47892, 47962), 'numpy.testing.assert_allclose', 'assert_allclose', (['res1.model._backcast', 'res2.model._backcast'], {'rtol': 'RTOL'}), '(res1.model._backcast, res2.model._backcast, rtol=RTOL)\n', (47907, 47962), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((47967, 48019), 'numpy.testing.assert_allclose', 'assert_allclose', (['res1.params', 'res2.params'], {'rtol': 'RTOL'}), '(res1.params, res2.params, rtol=RTOL)\n', (47982, 48019), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((1674, 1694), 'struct.calcsize', 'struct.calcsize', (['"""P"""'], {}), "('P')\n", (1689, 1694), False, 'import struct\n'), ((2082, 2108), 'numpy.array', 'np.array', (['[0.1, 0.1, 0.88]'], {}), '([0.1, 0.1, 0.88])\n', (2090, 2108), True, 'import numpy as np\n'), ((2127, 2152), 'numpy.asarray', 'np.asarray', (['sim_data.data'], {}), '(sim_data.data)\n', (2137, 2152), True, 'import numpy as np\n'), ((2281, 2298), 'numpy.random.RandomState', 'RandomState', (['(1234)'], {}), '(1234)\n', (2292, 2298), False, 'from numpy.random import RandomState\n'), ((2385, 2395), 'arch.univariate.mean.ZeroMean', 'ZeroMean', ([], {}), '()\n', (2393, 2395), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((2420, 2427), 'arch.univariate.volatility.GARCH', 'GARCH', ([], {}), '()\n', (2425, 2427), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((2472, 2499), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (2493, 2499), True, 'import numpy as np\n'), ((2526, 2551), 'arch.univariate.distribution.Normal', 'Normal', ([], {'seed': 'random_state'}), '(seed=random_state)\n', (2532, 2551), False, 'from arch.univariate.distribution import GeneralizedError, Normal, SkewStudent, StudentsT\n'), ((2753, 2804), 'pandas.date_range', 'pd.date_range', (['"""2000-12-31"""'], {'periods': '(1000)', 'freq': '"""W"""'}), "('2000-12-31', periods=1000, freq='W')\n", (2766, 2804), True, 'import pandas as pd\n'), ((2861, 2937), 'pandas.DataFrame', 'pd.DataFrame', (['cls.y[:, None]'], {'columns': "['LongVariableName']", 'index': 'date_index'}), "(cls.y[:, None], columns=['LongVariableName'], index=date_index)\n", (2873, 2937), True, 'import pandas as pd\n'), ((2984, 3055), 'pandas.Series', 'pd.Series', (['cls.y'], {'name': '"""VeryVeryLongLongVariableName"""', 'index': 'date_index'}), "(cls.y, name='VeryVeryLongLongVariableName', index=date_index)\n", (2993, 3055), True, 'import pandas as pd\n'), ((3180, 3230), 'pandas.DataFrame', 'pd.DataFrame', (['cls.x'], {'columns': "['LongExogenousName']"}), "(cls.x, columns=['LongExogenousName'])\n", (3192, 3230), True, 'import pandas as pd\n'), ((3255, 3273), 'numpy.var', 'np.var', (['cls.resids'], {}), '(cls.resids)\n', (3261, 3273), True, 'import numpy as np\n'), ((3295, 3320), 'numpy.zeros_like', 'np.zeros_like', (['cls.resids'], {}), '(cls.resids)\n', (3308, 3320), True, 'import numpy as np\n'), ((3396, 3416), 'arch.univariate.mean.ConstantMean', 'ConstantMean', (['self.y'], {}), '(self.y)\n', (3408, 3416), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((3438, 3458), 'numpy.array', 'np.array', (['[5.0, 1.0]'], {}), '([5.0, 1.0])\n', (3446, 3458), True, 'import numpy as np\n'), ((3507, 3537), 'numpy.testing.assert_equal', 'assert_equal', (['cm.num_params', '(1)'], {}), '(cm.num_params, 1)\n', (3519, 3537), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((3675, 3716), 'numpy.testing.assert_equal', 'assert_equal', (['bounds', '[(-np.inf, np.inf)]'], {}), '(bounds, [(-np.inf, np.inf)])\n', (3687, 3716), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((3725, 3756), 'numpy.testing.assert_equal', 'assert_equal', (['cm.constant', '(True)'], {}), '(cm.constant, True)\n', (3737, 3756), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((4114, 4155), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.params', 'expected'], {}), '(res.params, expected)\n', (4133, 4155), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((4902, 4918), 'arch.univariate.mean.ZeroMean', 'ZeroMean', (['self.y'], {}), '(self.y)\n', (4910, 4918), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((4940, 4955), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (4948, 4955), True, 'import numpy as np\n'), ((5011, 5048), 'numpy.testing.assert_equal', 'assert_equal', (['data.shape', '(self.T, 3)'], {}), '(data.shape, (self.T, 3))\n', (5023, 5048), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((5057, 5100), 'numpy.testing.assert_equal', 'assert_equal', (["data['data'].shape[0]", 'self.T'], {}), "(data['data'].shape[0], self.T)\n", (5069, 5100), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((5109, 5139), 'numpy.testing.assert_equal', 'assert_equal', (['zm.num_params', '(0)'], {}), '(zm.num_params, 0)\n', (5121, 5139), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((5177, 5201), 'numpy.testing.assert_equal', 'assert_equal', (['bounds', '[]'], {}), '(bounds, [])\n', (5189, 5201), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((5210, 5242), 'numpy.testing.assert_equal', 'assert_equal', (['zm.constant', '(False)'], {}), '(zm.constant, False)\n', (5222, 5242), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((6017, 6024), 'arch.univariate.volatility.GARCH', 'GARCH', ([], {}), '()\n', (6022, 6024), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((6277, 6314), 'arch.univariate.mean.HARX', 'HARX', (['self.y', 'self.x'], {'lags': '[1, 5, 22]'}), '(self.y, self.x, lags=[1, 5, 22])\n', (6281, 6314), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((6364, 6404), 'numpy.array', 'np.array', (['[1.0, 0.4, 0.3, 0.2, 1.0, 1.0]'], {}), '([1.0, 0.4, 0.3, 0.2, 1.0, 1.0])\n', (6372, 6404), True, 'import numpy as np\n'), ((6641, 6685), 'numpy.testing.assert_equal', 'assert_equal', (['alt_iv_data.shape', '(self.T, 3)'], {}), '(alt_iv_data.shape, (self.T, 3))\n', (6653, 6685), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((6762, 6799), 'numpy.testing.assert_equal', 'assert_equal', (['data.shape', '(self.T, 3)'], {}), '(data.shape, (self.T, 3))\n', (6774, 6799), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((7082, 7136), 'numpy.testing.assert_equal', 'assert_equal', (['harx.num_params', '(1 + 3 + self.x.shape[1])'], {}), '(harx.num_params, 1 + 3 + self.x.shape[1])\n', (7094, 7136), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((7145, 7178), 'numpy.testing.assert_equal', 'assert_equal', (['harx.constant', '(True)'], {}), '(harx.constant, True)\n', (7157, 7178), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((7479, 7497), 'numpy.ones', 'np.ones', (['(nobs, 5)'], {}), '((nobs, 5))\n', (7486, 7497), True, 'import numpy as np\n'), ((7797, 7841), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['params', 'res.params[:-1]'], {}), '(params, res.params[:-1])\n', (7816, 7841), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((7889, 7924), 'numpy.testing.assert_equal', 'assert_equal', (['harx.lags', '[1, 5, 22]'], {}), '(harx.lags, [1, 5, 22])\n', (7901, 7924), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((7933, 7965), 'numpy.testing.assert_equal', 'assert_equal', (['harx.name', '"""HAR-X"""'], {}), "(harx.name, 'HAR-X')\n", (7945, 7965), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((7974, 8011), 'numpy.testing.assert_equal', 'assert_equal', (['harx.use_rotated', '(False)'], {}), '(harx.use_rotated, False)\n', (7986, 8011), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((8887, 8935), 'arch.univariate.mean.HARX', 'HARX', (['self.y'], {'lags': '[1, 5, 22]', 'constant': 'constant'}), '(self.y, lags=[1, 5, 22], constant=constant)\n', (8891, 8935), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((8953, 8988), 'numpy.array', 'np.array', (['[1.0, 0.4, 0.3, 0.2, 1.0]'], {}), '([1.0, 0.4, 0.3, 0.2, 1.0])\n', (8961, 8988), True, 'import numpy as np\n'), ((9098, 9135), 'numpy.testing.assert_equal', 'assert_equal', (['data.shape', '(self.T, 3)'], {}), '(data.shape, (self.T, 3))\n', (9110, 9135), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((9489, 9525), 'numpy.testing.assert_equal', 'assert_equal', (['har.constant', 'constant'], {}), '(har.constant, constant)\n', (9501, 9525), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((9731, 9749), 'numpy.ones', 'np.ones', (['(nobs, 4)'], {}), '((nobs, 4))\n', (9738, 9749), True, 'import numpy as np\n'), ((10068, 10112), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['params', 'res.params[:-1]'], {}), '(params, res.params[:-1])\n', (10087, 10112), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((10480, 10502), 'numpy.asarray', 'np.asarray', (['res.params'], {}), '(res.params)\n', (10490, 10502), True, 'import numpy as np\n'), ((10519, 10534), 'numpy.zeros', 'np.zeros', (['(t + 6)'], {}), '(t + 6)\n', (10527, 10534), True, 'import numpy as np\n'), ((11370, 11404), 'numpy.testing.assert_equal', 'assert_equal', (['har.lags', '[1, 5, 22]'], {}), '(har.lags, [1, 5, 22])\n', (11382, 11404), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((11413, 11442), 'numpy.testing.assert_equal', 'assert_equal', (['har.name', '"""HAR"""'], {}), "(har.name, 'HAR')\n", (11425, 11442), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((11451, 11487), 'numpy.testing.assert_equal', 'assert_equal', (['har.use_rotated', '(False)'], {}), '(har.use_rotated, False)\n', (11463, 11487), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((11503, 11539), 'arch.univariate.mean.HARX', 'HARX', (['self.y_series'], {'lags': '[1, 5, 22]'}), '(self.y_series, lags=[1, 5, 22])\n', (11507, 11539), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((11820, 11842), 'numpy.asarray', 'np.asarray', (['res.params'], {}), '(res.params)\n', (11830, 11842), True, 'import numpy as np\n'), ((11859, 11874), 'numpy.zeros', 'np.zeros', (['(t + 6)'], {}), '(t + 6)\n', (11867, 11874), True, 'import numpy as np\n'), ((12513, 12570), 'arch.univariate.mean.ARX', 'ARX', (['self.y', 'self.x'], {'lags': '(3)', 'hold_back': '(10)', 'constant': '(False)'}), '(self.y, self.x, lags=3, hold_back=10, constant=False)\n', (12516, 12570), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((12588, 12623), 'numpy.array', 'np.array', (['[0.4, 0.3, 0.2, 1.0, 1.0]'], {}), '([0.4, 0.3, 0.2, 1.0, 1.0])\n', (12596, 12623), True, 'import numpy as np\n'), ((12929, 12960), 'numpy.testing.assert_equal', 'assert_equal', (['arx.num_params', '(4)'], {}), '(arx.num_params, 4)\n', (12941, 12960), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((13229, 13248), 'numpy.zeros', 'np.zeros', (['(nobs, 4)'], {}), '((nobs, 4))\n', (13237, 13248), True, 'import numpy as np\n'), ((13531, 13575), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['params', 'res.params[:-1]'], {}), '(params, res.params[:-1])\n', (13550, 13575), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((13584, 13615), 'numpy.testing.assert_equal', 'assert_equal', (['arx.hold_back', '(10)'], {}), '(arx.hold_back, 10)\n', (13596, 13615), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((13689, 13719), 'numpy.testing.assert_equal', 'assert_equal', (['arx.name', '"""AR-X"""'], {}), "(arx.name, 'AR-X')\n", (13701, 13719), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((13728, 13764), 'numpy.testing.assert_equal', 'assert_equal', (['arx.use_rotated', '(False)'], {}), '(arx.use_rotated, False)\n', (13740, 13764), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((13875, 13894), 'arch.univariate.mean.ARX', 'ARX', (['self.y'], {'lags': '(3)'}), '(self.y, lags=3)\n', (13878, 13894), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((13912, 13947), 'numpy.array', 'np.array', (['[1.0, 0.4, 0.3, 0.2, 1.0]'], {}), '([1.0, 0.4, 0.3, 0.2, 1.0])\n', (13920, 13947), True, 'import numpy as np\n'), ((14034, 14060), 'numpy.testing.assert_equal', 'assert_equal', (['self.y', 'ar.y'], {}), '(self.y, ar.y)\n', (14046, 14060), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((14241, 14271), 'numpy.testing.assert_equal', 'assert_equal', (['ar.num_params', '(4)'], {}), '(ar.num_params, 4)\n', (14253, 14271), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((14484, 14502), 'numpy.ones', 'np.ones', (['(nobs, 4)'], {}), '((nobs, 4))\n', (14491, 14502), True, 'import numpy as np\n'), ((14744, 14788), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['params', 'res.params[:-1]'], {}), '(params, res.params[:-1])\n', (14763, 14788), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((15676, 15703), 'numpy.testing.assert_equal', 'assert_equal', (['ar.name', '"""AR"""'], {}), "(ar.name, 'AR')\n", (15688, 15703), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((15712, 15747), 'numpy.testing.assert_equal', 'assert_equal', (['ar.use_rotated', '(False)'], {}), '(ar.use_rotated, False)\n', (15724, 15747), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((15809, 15831), 'arch.univariate.mean.ARX', 'ARX', (['self.y_df'], {'lags': '(5)'}), '(self.y_df, lags=5)\n', (15812, 15831), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((15867, 15893), 'arch.univariate.mean.ARX', 'ARX', (['self.y_series'], {'lags': '(5)'}), '(self.y_series, lags=5)\n', (15870, 15893), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((16168, 16213), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['res.std_resid', 'std_resid'], {}), '(res.std_resid, std_resid)\n', (16187, 16213), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((16997, 17016), 'arch.univariate.mean.ARX', 'ARX', (['self.y'], {'lags': '(0)'}), '(self.y, lags=0)\n', (17000, 17016), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((17685, 17701), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (17694, 17701), True, 'import matplotlib.pyplot as plt\n'), ((18027, 18043), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (18036, 18043), True, 'import matplotlib.pyplot as plt\n'), ((18209, 18222), 'arch.univariate.mean.ARX', 'ARX', ([], {'y': 'y', 'x': 'x'}), '(y=y, x=x)\n', (18212, 18222), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((18371, 18380), 'arch.univariate.volatility.ARCH', 'ARCH', ([], {'p': '(2)'}), '(p=2)\n', (18375, 18380), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((18650, 18671), 'arch.univariate.mean.ARX', 'ARX', ([], {'y': 'y', 'lags': '(2)', 'x': 'x'}), '(y=y, lags=2, x=x)\n', (18653, 18671), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((18892, 18901), 'arch.univariate.volatility.ARCH', 'ARCH', ([], {'p': '(2)'}), '(p=2)\n', (18896, 18901), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((19345, 19388), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {'columns': "['x0', 'x1', 'x2']"}), "(x, columns=['x0', 'x1', 'x2'])\n", (19357, 19388), True, 'import pandas as pd\n'), ((19401, 19423), 'pandas.Series', 'pd.Series', (['y'], {'name': '"""y"""'}), "(y, name='y')\n", (19410, 19423), True, 'import pandas as pd\n'), ((19437, 19450), 'arch.univariate.mean.ARX', 'ARX', ([], {'y': 'y', 'x': 'x'}), '(y=y, x=x)\n', (19440, 19450), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((19514, 19523), 'arch.univariate.volatility.ARCH', 'ARCH', ([], {'p': '(2)'}), '(p=2)\n', (19518, 19523), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((19824, 19842), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {}), '(self.y)\n', (19834, 19842), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((20000, 20048), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'mean': '"""harx"""', 'lags': '[1, 5, 22]'}), "(self.y, mean='harx', lags=[1, 5, 22])\n", (20010, 20048), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((20147, 20194), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'mean': '"""har"""', 'lags': '[1, 5, 22]'}), "(self.y, mean='har', lags=[1, 5, 22])\n", (20157, 20194), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((20293, 20330), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y', 'self.x'], {'mean': '"""ls"""'}), "(self.y, self.x, mean='ls')\n", (20303, 20330), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((20449, 20496), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'mean': '"""arx"""', 'lags': '[1, 5, 22]'}), "(self.y, mean='arx', lags=[1, 5, 22])\n", (20459, 20496), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((20594, 20640), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'mean': '"""ar"""', 'lags': '[1, 5, 22]'}), "(self.y, mean='ar', lags=[1, 5, 22])\n", (20604, 20640), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((20738, 20778), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'mean': '"""ar"""', 'lags': 'None'}), "(self.y, mean='ar', lags=None)\n", (20748, 20778), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((20876, 20907), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'mean': '"""zero"""'}), "(self.y, mean='zero')\n", (20886, 20907), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((21010, 21041), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'vol': '"""Harch"""'}), "(self.y, vol='Harch')\n", (21020, 21041), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((21148, 21182), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'vol': '"""Constant"""'}), "(self.y, vol='Constant')\n", (21158, 21182), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((21300, 21330), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'vol': '"""arch"""'}), "(self.y, vol='arch')\n", (21310, 21330), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((21392, 21424), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'vol': '"""egarch"""'}), "(self.y, vol='egarch')\n", (21402, 21424), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((21488, 21521), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'vol': '"""figarch"""'}), "(self.y, vol='figarch')\n", (21498, 21521), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((21586, 21618), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'vol': '"""aparch"""'}), "(self.y, vol='aparch')\n", (21596, 21618), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((22000, 22043), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y_df', 'self.x_df'], {'mean': '"""ls"""'}), "(self.y_df, self.x_df, mean='ls')\n", (22010, 22043), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((22120, 22165), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'mean': '"""ar"""', 'lags': '[1, 3, 5]'}), "(self.y, mean='ar', lags=[1, 3, 5])\n", (22130, 22165), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((22252, 22315), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'mean': '"""ar"""', 'lags': '[1, 3, 5]', 'dist': '"""studentst"""'}), "(self.y, mean='ar', lags=[1, 3, 5], dist='studentst')\n", (22262, 22315), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((22456, 22513), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'mean': '"""ar"""', 'lags': '[1, 3, 5]', 'dist': '"""ged"""'}), "(self.y, mean='ar', lags=[1, 3, 5], dist='ged')\n", (22466, 22513), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((22661, 22720), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'mean': '"""ar"""', 'lags': '[1, 3, 5]', 'dist': '"""skewt"""'}), "(self.y, mean='ar', lags=[1, 3, 5], dist='skewt')\n", (22671, 22720), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((23359, 23378), 'arch.univariate.mean.ARX', 'ARX', (['self.y'], {'lags': '(1)'}), '(self.y, lags=1)\n', (23362, 23378), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((24494, 24522), 'arch.univariate.mean.HARX', 'HARX', (['self.y'], {'lags': '[1, 2, 3]'}), '(self.y, lags=[1, 2, 3])\n', (24498, 24522), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((24539, 24585), 'arch.univariate.mean.HARX', 'HARX', (['self.y'], {'lags': '[1, 2, 3]', 'use_rotated': '(True)'}), '(self.y, lags=[1, 2, 3], use_rotated=True)\n', (24543, 24585), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((24605, 24643), 'arch.univariate.mean.HARX', 'HARX', (['self.y'], {'lags': '(3)', 'use_rotated': '(True)'}), '(self.y, lags=3, use_rotated=True)\n', (24609, 24643), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((24657, 24684), 'arch.univariate.mean.ARX', 'ARX', (['self.y'], {'lags': '[1, 2, 3]'}), '(self.y, lags=[1, 2, 3])\n', (24660, 24684), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((24701, 24720), 'arch.univariate.mean.ARX', 'ARX', (['self.y'], {'lags': '(3)'}), '(self.y, lags=3)\n', (24704, 24720), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((24946, 25003), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res_har.rsquared', 'res_har_r.rsquared'], {}), '(res_har.rsquared, res_har_r.rsquared)\n', (24965, 25003), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((25012, 25074), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res_har_r_v2.rsquared', 'res_har_r.rsquared'], {}), '(res_har_r_v2.rsquared, res_har_r.rsquared)\n', (25031, 25074), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((25372, 25464), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res_ar.conditional_volatility', 'res_har_r_v2.conditional_volatility'], {}), '(res_ar.conditional_volatility, res_har_r_v2.\n conditional_volatility)\n', (25391, 25464), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((25490, 25543), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res_ar.resid', 'res_har_r_v2.resid'], {}), '(res_ar.resid, res_har_r_v2.resid)\n', (25509, 25543), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((25594, 25639), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'mean': '"""ar"""', 'lags': '[1, 3, 5]'}), "(self.y, mean='ar', lags=[1, 3, 5])\n", (25604, 25639), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((25973, 26004), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'mean': '"""zero"""'}), "(self.y, mean='zero')\n", (25983, 26004), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((26018, 26043), 'numpy.array', 'np.array', (['[1.0, 0.3, 0.8]'], {}), '([1.0, 0.3, 0.8])\n', (26026, 26043), True, 'import numpy as np\n'), ((26258, 26278), 'arch.univariate.mean.ConstantMean', 'ConstantMean', (['self.y'], {}), '(self.y)\n', (26270, 26278), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((26303, 26317), 'arch.univariate.volatility.EWMAVariance', 'EWMAVariance', ([], {}), '()\n', (26315, 26317), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((26386, 26403), 'arch.univariate.volatility.RiskMetrics2006', 'RiskMetrics2006', ([], {}), '()\n', (26401, 26403), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((26462, 26481), 'arch.univariate.mean.ARX', 'ARX', (['self.y'], {'lags': '(5)'}), '(self.y, lags=5)\n', (26465, 26481), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((26506, 26520), 'arch.univariate.volatility.EWMAVariance', 'EWMAVariance', ([], {}), '()\n', (26518, 26520), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((26589, 26606), 'arch.univariate.volatility.RiskMetrics2006', 'RiskMetrics2006', ([], {}), '()\n', (26604, 26606), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((26824, 26844), 'arch.univariate.mean.ConstantMean', 'ConstantMean', (['self.y'], {}), '(self.y)\n', (26836, 26844), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((26869, 26877), 'arch.univariate.volatility.EGARCH', 'EGARCH', ([], {}), '()\n', (26875, 26877), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((27052, 27063), 'arch.univariate.distribution.StudentsT', 'StudentsT', ([], {}), '()\n', (27061, 27063), False, 'from arch.univariate.distribution import GeneralizedError, Normal, SkewStudent, StudentsT\n'), ((27304, 27324), 'arch.univariate.mean.ConstantMean', 'ConstantMean', (['self.y'], {}), '(self.y)\n', (27316, 27324), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((28138, 28172), 'arch.univariate.mean.ARX', 'ARX', (['self.y'], {'lags': '(5)', 'hold_back': '(100)'}), '(self.y, lags=5, hold_back=100)\n', (28141, 28172), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((28327, 28359), 'numpy.testing.assert_equal', 'assert_equal', (['resids', 'resid_copy'], {}), '(resids, resid_copy)\n', (28339, 28359), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((28385, 28392), 'arch.univariate.volatility.GARCH', 'GARCH', ([], {}), '()\n', (28390, 28392), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((28547, 28579), 'numpy.testing.assert_equal', 'assert_equal', (['resids', 'resid_copy'], {}), '(resids, resid_copy)\n', (28559, 28579), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((28594, 28613), 'arch.univariate.mean.ARX', 'ARX', (['self.y'], {'lags': '(5)'}), '(self.y, lags=5)\n', (28597, 28613), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((28638, 28645), 'arch.univariate.volatility.GARCH', 'GARCH', ([], {}), '()\n', (28643, 28645), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((28814, 28846), 'numpy.testing.assert_equal', 'assert_equal', (['resids', 'resid_copy'], {}), '(resids, resid_copy)\n', (28826, 28846), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((28861, 28895), 'arch.univariate.mean.ARX', 'ARX', (['self.y'], {'lags': '(5)', 'hold_back': '(100)'}), '(self.y, lags=5, hold_back=100)\n', (28864, 28895), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((28920, 28927), 'arch.univariate.volatility.GARCH', 'GARCH', ([], {}), '()\n', (28925, 28927), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((29130, 29162), 'numpy.testing.assert_equal', 'assert_equal', (['resids', 'resid_copy'], {}), '(resids, resid_copy)\n', (29142, 29162), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((29307, 29334), 'numpy.testing.assert_equal', 'assert_equal', (['vol', 'vol_copy'], {}), '(vol, vol_copy)\n', (29319, 29334), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((29343, 29386), 'numpy.testing.assert_equal', 'assert_equal', (['self.y.shape[0]', 'vol.shape[0]'], {}), '(self.y.shape[0], vol.shape[0])\n', (29355, 29386), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((29401, 29420), 'arch.univariate.mean.ARX', 'ARX', (['self.y'], {'lags': '(5)'}), '(self.y, lags=5)\n', (29404, 29420), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((29445, 29452), 'arch.univariate.volatility.GARCH', 'GARCH', ([], {}), '()\n', (29450, 29452), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((29653, 29685), 'numpy.testing.assert_equal', 'assert_equal', (['resids', 'resid_copy'], {}), '(resids, resid_copy)\n', (29665, 29685), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((29767, 29782), 'arch.univariate.mean.ConstantMean', 'ConstantMean', (['y'], {}), '(y)\n', (29779, 29782), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((29855, 29870), 'arch.univariate.mean.ConstantMean', 'ConstantMean', (['y'], {}), '(y)\n', (29867, 29870), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((29930, 29979), 'numpy.testing.assert_equal', 'assert_equal', (['res.resid.values', 'res2.resid.values'], {}), '(res.resid.values, res2.resid.values)\n', (29942, 29979), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((30023, 30074), 'pandas.date_range', 'pd.date_range', (['"""2000-01-01"""', '"""2010-01-01"""'], {'freq': '"""M"""'}), "('2000-01-01', '2010-01-01', freq='M')\n", (30036, 30074), True, 'import pandas as pd\n'), ((30310, 30348), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['aligned', 'forecasts'], {}), '(aligned, forecasts)\n', (30328, 30348), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((30614, 30649), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['aligned', 'direct'], {}), '(aligned, direct)\n', (30632, 30649), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((30803, 30828), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y_series'], {}), '(self.y_series)\n', (30813, 30828), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((30911, 30997), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['res.conditional_volatility', 'fixed_res.conditional_volatility'], {}), '(res.conditional_volatility, fixed_res.\n conditional_volatility)\n', (30930, 30997), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((31023, 31072), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['res.params', 'fixed_res.params'], {}), '(res.params, fixed_res.params)\n', (31042, 31072), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((31081, 31117), 'numpy.testing.assert_equal', 'assert_equal', (['res.aic', 'fixed_res.aic'], {}), '(res.aic, fixed_res.aic)\n', (31093, 31117), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((31126, 31162), 'numpy.testing.assert_equal', 'assert_equal', (['res.bic', 'fixed_res.bic'], {}), '(res.bic, fixed_res.bic)\n', (31138, 31162), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((31171, 31227), 'numpy.testing.assert_equal', 'assert_equal', (['res.loglikelihood', 'fixed_res.loglikelihood'], {}), '(res.loglikelihood, fixed_res.loglikelihood)\n', (31183, 31227), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((31236, 31286), 'numpy.testing.assert_equal', 'assert_equal', (['res.num_params', 'fixed_res.num_params'], {}), '(res.num_params, fixed_res.num_params)\n', (31248, 31286), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((31409, 31434), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y_series'], {}), '(self.y_series)\n', (31419, 31434), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((31487, 31512), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y_series'], {}), '(self.y_series)\n', (31497, 31512), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((31564, 31650), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['res.conditional_volatility', 'fixed_res.conditional_volatility'], {}), '(res.conditional_volatility, fixed_res.\n conditional_volatility)\n', (31583, 31650), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((31676, 31725), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['res.params', 'fixed_res.params'], {}), '(res.params, fixed_res.params)\n', (31695, 31725), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((31734, 31770), 'numpy.testing.assert_equal', 'assert_equal', (['res.aic', 'fixed_res.aic'], {}), '(res.aic, fixed_res.aic)\n', (31746, 31770), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((31779, 31815), 'numpy.testing.assert_equal', 'assert_equal', (['res.bic', 'fixed_res.bic'], {}), '(res.bic, fixed_res.bic)\n', (31791, 31815), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((31824, 31880), 'numpy.testing.assert_equal', 'assert_equal', (['res.loglikelihood', 'fixed_res.loglikelihood'], {}), '(res.loglikelihood, fixed_res.loglikelihood)\n', (31836, 31880), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((31889, 31939), 'numpy.testing.assert_equal', 'assert_equal', (['res.num_params', 'fixed_res.num_params'], {}), '(res.num_params, fixed_res.num_params)\n', (31901, 31939), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((31990, 32015), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y_series'], {}), '(self.y_series)\n', (32000, 32015), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((32097, 32122), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y_series'], {}), '(self.y_series)\n', (32107, 32122), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((32203, 32252), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['res.params', 'fixed_res.params'], {}), '(res.params, fixed_res.params)\n', (32222, 32252), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((32261, 32297), 'numpy.testing.assert_equal', 'assert_equal', (['res.aic', 'fixed_res.aic'], {}), '(res.aic, fixed_res.aic)\n', (32273, 32297), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((32306, 32342), 'numpy.testing.assert_equal', 'assert_equal', (['res.bic', 'fixed_res.bic'], {}), '(res.bic, fixed_res.bic)\n', (32318, 32342), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((32351, 32407), 'numpy.testing.assert_equal', 'assert_equal', (['res.loglikelihood', 'fixed_res.loglikelihood'], {}), '(res.loglikelihood, fixed_res.loglikelihood)\n', (32363, 32407), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((32416, 32466), 'numpy.testing.assert_equal', 'assert_equal', (['res.num_params', 'fixed_res.num_params'], {}), '(res.num_params, fixed_res.num_params)\n', (32428, 32466), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((32516, 32541), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y_series'], {}), '(self.y_series)\n', (32526, 32541), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((32879, 33205), 'numpy.array', 'np.array', (['[0.83277114, 0.45194014, -0.33475561, -0.49463896, 0.54715787, 1.11895382, \n 1.31280266, 0.81464021, 0.8532107, 1.0967188, 0.9346354, 0.92289249, \n 1.01339085, 1.071065, 1.42413486, 1.15392453, 1.10929691, 0.96162061, \n 0.96489515, 0.93250153, 1.34509807, 1.80951607, 1.66313783, 1.38610821,\n 1.26381761]'], {}), '([0.83277114, 0.45194014, -0.33475561, -0.49463896, 0.54715787, \n 1.11895382, 1.31280266, 0.81464021, 0.8532107, 1.0967188, 0.9346354, \n 0.92289249, 1.01339085, 1.071065, 1.42413486, 1.15392453, 1.10929691, \n 0.96162061, 0.96489515, 0.93250153, 1.34509807, 1.80951607, 1.66313783,\n 1.38610821, 1.26381761])\n', (32887, 33205), True, 'import numpy as np\n'), ((33637, 33681), 'arch.univariate.mean.arch_model', 'arch_model', (['y'], {'mean': '"""ARX"""', 'lags': '(10)', 'p': '(5)', 'q': '(0)'}), "(y, mean='ARX', lags=10, p=5, q=0)\n", (33647, 33681), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((34050, 34075), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y_series'], {}), '(self.y_series)\n', (34060, 34075), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((34438, 34479), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y_series'], {'vol': '"""Constant"""'}), "(self.y_series, vol='Constant')\n", (34448, 34479), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((34613, 34665), 'numpy.testing.assert_equal', 'assert_equal', (['res.resid.values', 'res_adj.resid.values'], {}), '(res.resid.values, res_adj.resid.values)\n', (34625, 34665), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((34674, 34728), 'numpy.testing.assert_equal', 'assert_equal', (['res.params.values', 'res_adj.params.values'], {}), '(res.params.values, res_adj.params.values)\n', (34686, 34728), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((34899, 34953), 'numpy.testing.assert_equal', 'assert_equal', (['res.params.values', 'res_adj.params.values'], {}), '(res.params.values, res_adj.params.values)\n', (34911, 34953), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((34962, 35014), 'numpy.testing.assert_equal', 'assert_equal', (['res.resid.values', 'res_adj.resid.values'], {}), '(res.resid.values, res_adj.resid.values)\n', (34974, 35014), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((35144, 35195), 'numpy.testing.assert_equal', 'assert_equal', (['res.params.values', 'res2.params.values'], {}), '(res.params.values, res2.params.values)\n', (35156, 35195), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((35204, 35253), 'numpy.testing.assert_equal', 'assert_equal', (['res.resid.values', 'res2.resid.values'], {}), '(res.resid.values, res2.resid.values)\n', (35216, 35253), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((35481, 35532), 'numpy.testing.assert_equal', 'assert_equal', (['res.params.values', 'res2.params.values'], {}), '(res.params.values, res2.params.values)\n', (35493, 35532), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((35541, 35590), 'numpy.testing.assert_equal', 'assert_equal', (['res.resid.values', 'res2.resid.values'], {}), '(res.resid.values, res2.resid.values)\n', (35553, 35590), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((35711, 35750), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y_series.iloc[100:900]'], {}), '(self.y_series.iloc[100:900])\n', (35721, 35750), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((35799, 35824), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y_series'], {}), '(self.y_series)\n', (35809, 35824), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((36013, 36064), 'numpy.testing.assert_equal', 'assert_equal', (['res.params.values', 'res2.params.values'], {}), '(res.params.values, res2.params.values)\n', (36025, 36064), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((36073, 36125), 'numpy.testing.assert_equal', 'assert_equal', (['res2.params.values', 'res3.params.values'], {}), '(res2.params.values, res3.params.values)\n', (36085, 36125), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((36140, 36180), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y_series'], {'hold_back': '(100)'}), '(self.y_series, hold_back=100)\n', (36150, 36180), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((36239, 36290), 'numpy.testing.assert_equal', 'assert_equal', (['res.params.values', 'res4.params.values'], {}), '(res.params.values, res4.params.values)\n', (36251, 36290), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((36414, 36480), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y_series.iloc[100:900]'], {'mean': '"""AR"""', 'lags': '[1, 2, 4]'}), "(self.y_series.iloc[100:900], mean='AR', lags=[1, 2, 4])\n", (36424, 36480), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((36529, 36581), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y_series'], {'mean': '"""AR"""', 'lags': '[1, 2, 4]'}), "(self.y_series, mean='AR', lags=[1, 2, 4])\n", (36539, 36581), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((36770, 36839), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.params.values', 'res2.params.values'], {'decimal': '(4)'}), '(res.params.values, res2.params.values, decimal=4)\n', (36789, 36839), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((36848, 36918), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res2.params.values', 'res3.params.values'], {'decimal': '(4)'}), '(res2.params.values, res3.params.values, decimal=4)\n', (36867, 36918), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((36933, 37000), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y_series'], {'mean': '"""AR"""', 'lags': '[1, 2, 4]', 'hold_back': '(100)'}), "(self.y_series, mean='AR', lags=[1, 2, 4], hold_back=100)\n", (36943, 37000), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((37072, 37141), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['res.params.values', 'res4.params.values'], {'decimal': '(4)'}), '(res.params.values, res4.params.values, decimal=4)\n', (37091, 37141), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((37241, 37258), 'numpy.random.RandomState', 'RandomState', (['(1234)'], {}), '(1234)\n', (37252, 37258), False, 'from numpy.random import RandomState\n'), ((37340, 37357), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (37347, 37357), True, 'import numpy as np\n'), ((38269, 38285), 'arch.univariate.mean.arch_model', 'arch_model', (['None'], {}), '(None)\n', (38279, 38285), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((38396, 38417), 'arch.univariate.mean.arch_model', 'arch_model', (['data.data'], {}), '(data.data)\n', (38406, 38417), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((38856, 38890), 'arch.univariate.mean.HARX', 'HARX', (['self.y[:24]'], {'lags': '[1, 5, 22]'}), '(self.y[:24], lags=[1, 5, 22])\n', (38860, 38890), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((38979, 39006), 'arch.univariate.mean.HARX', 'HARX', (['None'], {'lags': '[1, 5, 22]'}), '(None, lags=[1, 5, 22])\n', (38983, 39006), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((39517, 39562), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['res.params', 'res_z.params'], {}), '(res.params, res_z.params)\n', (39536, 39562), False, 'from pandas.testing import assert_frame_equal, assert_series_equal\n'), ((39913, 39934), 'numpy.asarray', 'np.asarray', (['res.resid'], {}), '(res.resid)\n', (39923, 39934), True, 'import numpy as np\n'), ((39978, 39988), 'numpy.exp', 'np.exp', (['bc'], {}), '(bc)\n', (39984, 39988), True, 'import numpy as np\n'), ((40278, 40303), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (40291, 40303), False, 'import pytest\n'), ((40420, 40445), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (40433, 40445), False, 'import pytest\n'), ((42380, 42410), 'pytest.warns', 'pytest.warns', (['DataScaleWarning'], {}), '(DataScaleWarning)\n', (42392, 42410), False, 'import pytest\n'), ((43030, 43042), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (43037, 43042), True, 'import numpy as np\n'), ((43053, 43085), 'pytest.warns', 'pytest.warns', (['ConvergenceWarning'], {}), '(ConvergenceWarning)\n', (43065, 43085), False, 'import pytest\n'), ((44475, 44532), 'numpy.abs', 'np.abs', (['(res.model._backcast - res_limited.model._backcast)'], {}), '(res.model._backcast - res_limited.model._backcast)\n', (44481, 44532), True, 'import numpy as np\n'), ((44647, 44699), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""NaN or inf values"""'}), "(ValueError, match='NaN or inf values')\n", (44660, 44699), False, 'import pytest\n'), ((44709, 44722), 'arch.univariate.mean.arch_model', 'arch_model', (['y'], {}), '(y)\n', (44719, 44722), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((44793, 44845), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""NaN or inf values"""'}), "(ValueError, match='NaN or inf values')\n", (44806, 44845), False, 'import pytest\n'), ((44855, 44868), 'arch.univariate.mean.arch_model', 'arch_model', (['y'], {}), '(y)\n', (44865, 44868), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((44920, 44972), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""NaN or inf values"""'}), "(ValueError, match='NaN or inf values')\n", (44933, 44972), False, 'import pytest\n'), ((44982, 44995), 'arch.univariate.mean.arch_model', 'arch_model', (['y'], {}), '(y)\n', (44992, 44995), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((45133, 45150), 'arch.univariate.volatility.RiskMetrics2006', 'RiskMetrics2006', ([], {}), '()\n', (45148, 45150), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((45152, 45166), 'arch.univariate.volatility.EWMAVariance', 'EWMAVariance', ([], {}), '()\n', (45164, 45166), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((45583, 45640), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""volatility must inherit"""'}), "(TypeError, match='volatility must inherit')\n", (45596, 45640), False, 'import pytest\n'), ((45650, 45689), 'arch.univariate.mean.ConstantMean', 'ConstantMean', (['SP500'], {'volatility': '"""GARCH"""'}), "(SP500, volatility='GARCH')\n", (45662, 45689), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((45699, 45758), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""distribution must inherit"""'}), "(TypeError, match='distribution must inherit')\n", (45712, 45758), False, 'import pytest\n'), ((45768, 45810), 'arch.univariate.mean.ConstantMean', 'ConstantMean', (['SP500'], {'distribution': '"""Skew-t"""'}), "(SP500, distribution='Skew-t')\n", (45780, 45810), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((46653, 46680), 'pandas.Series', 'pd.Series', (['fcast.mean.index'], {}), '(fcast.mean.index)\n', (46662, 46680), True, 'import pandas as pd\n'), ((46682, 46704), 'pandas.Series', 'pd.Series', (['SP500.index'], {}), '(SP500.index)\n', (46691, 46704), True, 'import pandas as pd\n'), ((46748, 46777), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (46761, 46777), False, 'import pytest\n'), ((46787, 46811), 'arch.univariate.mean.arch_model', 'arch_model', (['SP500'], {'p': '"""3"""'}), "(SP500, p='3')\n", (46797, 46811), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((2016, 2023), 'arch.univariate.volatility.GARCH', 'GARCH', ([], {}), '()\n', (2021, 2023), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((2038, 2053), 'arch.univariate.distribution.Normal', 'Normal', ([], {'seed': 'rs'}), '(seed=rs)\n', (2044, 2053), False, 'from arch.univariate.distribution import GeneralizedError, Normal, SkewStudent, StudentsT\n'), ((2583, 2608), 'numpy.array', 'np.array', (['[0.1, 0.1, 0.8]'], {}), '([0.1, 0.1, 0.8])\n', (2591, 2608), True, 'import numpy as np\n'), ((2629, 2654), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2642, 2654), False, 'import pytest\n'), ((3551, 3576), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3564, 3576), False, 'import pytest\n'), ((3813, 3829), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (3821, 3829), True, 'import numpy as np\n'), ((3855, 3869), 'numpy.empty', 'np.empty', (['(0,)'], {}), '((0,))\n', (3863, 3869), True, 'import numpy as np\n'), ((4729, 4795), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""horizon must be an integer >= 1"""'}), "(ValueError, match='horizon must be an integer >= 1')\n", (4742, 4795), False, 'import pytest\n'), ((5299, 5315), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (5307, 5315), True, 'import numpy as np\n'), ((5341, 5355), 'numpy.empty', 'np.empty', (['(0,)'], {}), '((0,))\n', (5349, 5355), True, 'import numpy as np\n'), ((6969, 6996), 'numpy.testing.assert_equal', 'assert_equal', (['b[0]', '(-np.inf)'], {}), '(b[0], -np.inf)\n', (6981, 6996), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((7009, 7035), 'numpy.testing.assert_equal', 'assert_equal', (['b[1]', 'np.inf'], {}), '(b[1], np.inf)\n', (7021, 7035), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((7237, 7253), 'numpy.empty', 'np.empty', (['(0, 5)'], {}), '((0, 5))\n', (7245, 7253), True, 'import numpy as np\n'), ((7279, 7290), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (7287, 7290), True, 'import numpy as np\n'), ((7342, 7367), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7355, 7367), False, 'import pytest\n'), ((7633, 7658), 'numpy.mean', 'np.mean', (['y[i + 17:i + 22]'], {}), '(y[i + 17:i + 22])\n', (7640, 7658), True, 'import numpy as np\n'), ((7685, 7705), 'numpy.mean', 'np.mean', (['y[i:i + 22]'], {}), '(y[i:i + 22])\n', (7692, 7705), True, 'import numpy as np\n'), ((8236, 8261), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8249, 8261), False, 'import pytest\n'), ((8275, 8313), 'arch.univariate.mean.HARX', 'HARX', (['self.y', 'self.x'], {'lags': '[1, -5, 22]'}), '(self.y, self.x, lags=[1, -5, 22])\n', (8279, 8313), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((8327, 8352), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8340, 8352), False, 'import pytest\n'), ((8366, 8406), 'arch.univariate.mean.HARX', 'HARX', (['self.y', 'self.x'], {'lags': '[0, 1, 5, 22]'}), '(self.y, self.x, lags=[0, 1, 5, 22])\n', (8370, 8406), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((8420, 8445), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8433, 8445), False, 'import pytest\n'), ((8459, 8497), 'arch.univariate.mean.HARX', 'HARX', (['self.y', 'self.x'], {'lags': '[[-1], [3]]'}), '(self.y, self.x, lags=[[-1], [3]])\n', (8463, 8497), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((8511, 8536), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8524, 8536), False, 'import pytest\n'), ((8550, 8587), 'arch.univariate.mean.HARX', 'HARX', (['self.y', 'self.x'], {'lags': '[[0], [0]]'}), '(self.y, self.x, lags=[[0], [0]])\n', (8554, 8587), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((8601, 8626), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8614, 8626), False, 'import pytest\n'), ((8640, 8689), 'arch.univariate.mean.HARX', 'HARX', (['self.y', 'self.x'], {'lags': '[[1, 1, 3], [2, 3, 3]]'}), '(self.y, self.x, lags=[[1, 1, 3], [2, 3, 3]])\n', (8644, 8689), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((8703, 8728), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8716, 8728), False, 'import pytest\n'), ((8742, 8781), 'arch.univariate.mean.HARX', 'HARX', (['self.y', 'self.x'], {'lags': '[[[1], [3]]]'}), '(self.y, self.x, lags=[[[1], [3]]])\n', (8746, 8781), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((9304, 9331), 'numpy.testing.assert_equal', 'assert_equal', (['b[0]', '(-np.inf)'], {}), '(b[0], -np.inf)\n', (9316, 9331), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((9344, 9370), 'numpy.testing.assert_equal', 'assert_equal', (['b[1]', 'np.inf'], {}), '(b[1], np.inf)\n', (9356, 9370), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((9641, 9652), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (9649, 9652), True, 'import numpy as np\n'), ((9885, 9910), 'numpy.mean', 'np.mean', (['y[i + 17:i + 22]'], {}), '(y[i + 17:i + 22])\n', (9892, 9910), True, 'import numpy as np\n'), ((9937, 9957), 'numpy.mean', 'np.mean', (['y[i:i + 22]'], {}), '(y[i:i + 22])\n', (9944, 9957), True, 'import numpy as np\n'), ((10127, 10152), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10140, 10152), False, 'import pytest\n'), ((12816, 12843), 'numpy.testing.assert_equal', 'assert_equal', (['b[0]', '(-np.inf)'], {}), '(b[0], -np.inf)\n', (12828, 12843), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((12856, 12882), 'numpy.testing.assert_equal', 'assert_equal', (['b[1]', 'np.inf'], {}), '(b[1], np.inf)\n', (12868, 12882), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((13050, 13066), 'numpy.empty', 'np.empty', (['(0, 4)'], {}), '((0, 4))\n', (13058, 13066), True, 'import numpy as np\n'), ((13092, 13103), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (13100, 13103), True, 'import numpy as np\n'), ((13647, 13679), 'numpy.array', 'np.array', (['[[1, 2, 3], [1, 2, 3]]'], {}), '([[1, 2, 3], [1, 2, 3]])\n', (13655, 13679), True, 'import numpy as np\n'), ((14128, 14155), 'numpy.testing.assert_equal', 'assert_equal', (['b[0]', '(-np.inf)'], {}), '(b[0], -np.inf)\n', (14140, 14155), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((14168, 14194), 'numpy.testing.assert_equal', 'assert_equal', (['b[1]', 'np.inf'], {}), '(b[1], np.inf)\n', (14180, 14194), False, 'from numpy.testing import assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal\n'), ((14355, 14371), 'numpy.empty', 'np.empty', (['(0, 4)'], {}), '((0, 4))\n', (14363, 14371), True, 'import numpy as np\n'), ((14397, 14408), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (14405, 14408), True, 'import numpy as np\n'), ((15116, 15140), 'numpy.zeros', 'np.zeros', (['(y.shape[0] + 5)'], {}), '(y.shape[0] + 5)\n', (15124, 15140), True, 'import numpy as np\n'), ((15634, 15666), 'numpy.array', 'np.array', (['[[1, 2, 3], [1, 2, 3]]'], {}), '([[1, 2, 3], [1, 2, 3]])\n', (15642, 15666), True, 'import numpy as np\n'), ((17566, 17591), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (17579, 17591), False, 'import pytest\n'), ((21682, 21707), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (21695, 21707), False, 'import pytest\n'), ((21721, 21755), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'mean': '"""unknown"""'}), "(self.y, mean='unknown')\n", (21731, 21755), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((21769, 21794), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (21782, 21794), False, 'import pytest\n'), ((21808, 21841), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'vol': '"""unknown"""'}), "(self.y, vol='unknown')\n", (21818, 21841), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((21855, 21880), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (21868, 21880), False, 'import pytest\n'), ((21894, 21928), 'arch.univariate.mean.arch_model', 'arch_model', (['self.y'], {'dist': '"""unknown"""'}), "(self.y, dist='unknown')\n", (21904, 21928), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((22890, 22915), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (22903, 22915), False, 'import pytest\n'), ((23038, 23063), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23051, 23063), False, 'import pytest\n'), ((23077, 23093), 'arch.univariate.mean.ARX', 'ARX', (['self.y'], {'x': 'x'}), '(self.y, x=x)\n', (23080, 23093), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((23107, 23132), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23120, 23132), False, 'import pytest\n'), ((23188, 23213), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23201, 23213), False, 'import pytest\n'), ((23227, 23247), 'arch.univariate.mean.ARX', 'ARX', (['self.y'], {'lags': '(-1)'}), '(self.y, lags=-1)\n', (23230, 23247), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((23261, 23286), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23274, 23286), False, 'import pytest\n'), ((23392, 23417), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23405, 23417), False, 'import pytest\n'), ((23435, 23443), 'arch.univariate.distribution.Normal', 'Normal', ([], {}), '()\n', (23441, 23443), False, 'from arch.univariate.distribution import GeneralizedError, Normal, SkewStudent, StudentsT\n'), ((23488, 23513), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23501, 23513), False, 'import pytest\n'), ((23531, 23538), 'arch.univariate.volatility.GARCH', 'GARCH', ([], {}), '()\n', (23536, 23538), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((23620, 23645), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23633, 23645), False, 'import pytest\n'), ((23706, 23731), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23719, 23731), False, 'import pytest\n'), ((23787, 23812), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23800, 23812), False, 'import pytest\n'), ((23913, 23938), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23926, 23938), False, 'import pytest\n'), ((23968, 23986), 'arch.univariate.volatility.ConstantVariance', 'ConstantVariance', ([], {}), '()\n', (23984, 23986), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((24069, 24105), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (24092, 24105), False, 'import warnings\n'), ((24124, 24168), 'arch.univariate.mean.ARX', 'ARX', (['self.y'], {'lags': '[1, 2, 3, 12]', 'hold_back': '(5)'}), '(self.y, lags=[1, 2, 3, 12], hold_back=5)\n', (24127, 24168), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((24219, 24255), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (24242, 24255), False, 'import warnings\n'), ((24274, 24334), 'arch.univariate.mean.HARX', 'HARX', (['self.y'], {'lags': '[[1, 1, 1], [2, 5, 22]]', 'use_rotated': '(True)'}), '(self.y, lags=[[1, 1, 1], [2, 5, 22]], use_rotated=True)\n', (24278, 24334), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((25103, 25128), 'numpy.asarray', 'np.asarray', (['res_ar.params'], {}), '(res_ar.params)\n', (25113, 25128), True, 'import numpy as np\n'), ((25130, 25158), 'numpy.asarray', 'np.asarray', (['res_ar_v2.params'], {}), '(res_ar_v2.params)\n', (25140, 25158), True, 'import numpy as np\n'), ((25188, 25213), 'numpy.asarray', 'np.asarray', (['res_ar.params'], {}), '(res_ar.params)\n', (25198, 25213), True, 'import numpy as np\n'), ((25215, 25246), 'numpy.asarray', 'np.asarray', (['res_har_r_v2.params'], {}), '(res_har_r_v2.params)\n', (25225, 25246), True, 'import numpy as np\n'), ((25289, 25317), 'numpy.asarray', 'np.asarray', (['res_ar.param_cov'], {}), '(res_ar.param_cov)\n', (25299, 25317), True, 'import numpy as np\n'), ((25319, 25353), 'numpy.asarray', 'np.asarray', (['res_har_r_v2.param_cov'], {}), '(res_har_r_v2.param_cov)\n', (25329, 25353), True, 'import numpy as np\n'), ((26057, 26093), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (26080, 26093), False, 'import warnings\n'), ((30664, 30689), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (30677, 30689), False, 'import pytest\n'), ((30703, 30746), 'arch.univariate.base._align_forecast', '_align_forecast', (['forecasts'], {'align': '"""unknown"""'}), "(forecasts, align='unknown')\n", (30718, 30746), False, 'from arch.univariate.base import ARCHModelForecast, ARCHModelResult, _align_forecast\n'), ((32607, 32617), 'io.StringIO', 'StringIO', ([], {}), '()\n', (32615, 32617), False, 'from io import StringIO\n'), ((33755, 33776), 'pytest.warns', 'pytest.warns', (['warning'], {}), '(warning)\n', (33767, 33776), False, 'import pytest\n'), ((33825, 33846), 'pytest.warns', 'pytest.warns', (['warning'], {}), '(warning)\n', (33837, 33846), False, 'import pytest\n'), ((33914, 33944), 'pytest.warns', 'pytest.warns', (['DataScaleWarning'], {}), '(DataScaleWarning)\n', (33926, 33944), False, 'import pytest\n'), ((34089, 34114), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (34102, 34114), False, 'import pytest\n'), ((34192, 34217), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (34205, 34217), False, 'import pytest\n'), ((38344, 38375), 'numpy.array', 'np.array', (['[0.0, 0.1, 0.1, 0.85]'], {}), '([0.0, 0.1, 0.1, 0.85])\n', (38352, 38375), True, 'import numpy as np\n'), ((38569, 38605), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (38592, 38605), False, 'import warnings\n'), ((38904, 38929), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (38917, 38929), False, 'import pytest\n'), ((39020, 39047), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (39033, 39047), False, 'import pytest\n'), ((40260, 40267), 'arch.univariate.volatility.GARCH', 'GARCH', ([], {}), '()\n', (40265, 40267), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((40392, 40409), 'arch.univariate.volatility.RiskMetrics2006', 'RiskMetrics2006', ([], {}), '()\n', (40407, 40409), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((40950, 40957), 'arch.univariate.volatility.GARCH', 'GARCH', ([], {}), '()\n', (40955, 40957), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((41439, 41462), 'pandas.concat', 'pd.concat', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (41448, 41462), True, 'import pandas as pd\n'), ((41561, 41579), 'statsmodels.regression.linear_model.OLS', 'smlm.OLS', (['lhs', 'rhs'], {}), '(lhs, rhs)\n', (41569, 41579), True, 'import statsmodels.regression.linear_model as smlm\n'), ((42764, 42800), 'arch.univariate.mean.arch_model', 'arch_model', (['data.data'], {'rescale': '(False)'}), '(data.data, rescale=False)\n', (42774, 42800), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((46337, 46361), 'arch.univariate.mean.ConstantMean', 'ConstantMean', (['sp500_copy'], {}), '(sp500_copy)\n', (46349, 46361), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((46878, 46891), 'arch.univariate.mean.arch_model', 'arch_model', (['y'], {}), '(y)\n', (46888, 46891), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((46919, 46936), 'arch.univariate.mean.arch_model', 'arch_model', (['SP500'], {}), '(SP500)\n', (46929, 46936), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((47479, 47514), 'arch.univariate.mean.arch_model', 'arch_model', (['y'], {'mean': 'mean', 'lags': 'lags'}), '(y, mean=mean, lags=lags)\n', (47489, 47514), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((47542, 47581), 'arch.univariate.mean.arch_model', 'arch_model', (['SP500'], {'mean': 'mean', 'lags': 'lags'}), '(SP500, mean=mean, lags=lags)\n', (47552, 47581), False, 'from arch.univariate.mean import ARX, HARX, LS, ConstantMean, ZeroMean, arch_model\n'), ((48044, 48060), 'numpy.isfinite', 'np.isfinite', (['cv1'], {}), '(cv1)\n', (48055, 48060), True, 'import numpy as np\n'), ((48067, 48083), 'numpy.isfinite', 'np.isfinite', (['cv2'], {}), '(cv2)\n', (48078, 48083), True, 'import numpy as np\n'), ((2680, 2705), 'numpy.array', 'np.array', (['[0.1, 0.1, 0.8]'], {}), '([0.1, 0.1, 0.8])\n', (2688, 2705), True, 'import numpy as np\n'), ((4276, 4302), 'numpy.arange', 'np.arange', (['self.y.shape[0]'], {}), '(self.y.shape[0])\n', (4285, 4302), True, 'import numpy as np\n'), ((5715, 5741), 'numpy.arange', 'np.arange', (['self.y.shape[0]'], {}), '(self.y.shape[0])\n', (5724, 5741), True, 'import numpy as np\n'), ((7760, 7779), 'numpy.linalg.pinv', 'np.linalg.pinv', (['rhs'], {}), '(rhs)\n', (7774, 7779), True, 'import numpy as np\n'), ((10031, 10050), 'numpy.linalg.pinv', 'np.linalg.pinv', (['rhs'], {}), '(rhs)\n', (10045, 10050), True, 'import numpy as np\n'), ((10350, 10362), 'numpy.arange', 'np.arange', (['t'], {}), '(t)\n', (10359, 10362), True, 'import numpy as np\n'), ((13494, 13513), 'numpy.linalg.pinv', 'np.linalg.pinv', (['rhs'], {}), '(rhs)\n', (13508, 13513), True, 'import numpy as np\n'), ((14707, 14726), 'numpy.linalg.pinv', 'np.linalg.pinv', (['rhs'], {}), '(rhs)\n', (14721, 14726), True, 'import numpy as np\n'), ((14898, 14919), 'numpy.arange', 'np.arange', (['y.shape[0]'], {}), '(y.shape[0])\n', (14907, 14919), True, 'import numpy as np\n'), ((15240, 15309), 'numpy.array', 'np.array', (['[1.0, fcast[i + h - 1], fcast[i + h - 2], fcast[i + h - 3]]'], {}), '([1.0, fcast[i + h - 1], fcast[i + h - 2], fcast[i + h - 3]])\n', (15248, 15309), True, 'import numpy as np\n'), ((16447, 16454), 'arch.univariate.volatility.GARCH', 'GARCH', ([], {}), '()\n', (16452, 16454), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((16469, 16480), 'arch.univariate.distribution.StudentsT', 'StudentsT', ([], {}), '()\n', (16478, 16480), False, 'from arch.univariate.distribution import GeneralizedError, Normal, SkewStudent, StudentsT\n'), ((17337, 17344), 'arch.univariate.volatility.GARCH', 'GARCH', ([], {}), '()\n', (17342, 17344), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((17359, 17370), 'arch.univariate.distribution.StudentsT', 'StudentsT', ([], {}), '()\n', (17368, 17370), False, 'from arch.univariate.distribution import GeneralizedError, Normal, SkewStudent, StudentsT\n'), ((23671, 23681), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (23678, 23681), True, 'import numpy as np\n'), ((23757, 23767), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (23764, 23767), True, 'import numpy as np\n'), ((23838, 23848), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (23845, 23848), True, 'import numpy as np\n'), ((37522, 37545), 'arch.univariate.volatility.FixedVariance', 'FixedVariance', (['variance'], {}), '(variance)\n', (37535, 37545), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((37745, 37768), 'arch.univariate.volatility.FixedVariance', 'FixedVariance', (['variance'], {}), '(variance)\n', (37758, 37768), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((37983, 38023), 'arch.univariate.volatility.FixedVariance', 'FixedVariance', (['variance'], {'unit_scale': '(True)'}), '(variance, unit_scale=True)\n', (37996, 38023), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((38223, 38254), 'numpy.random.RandomState', 'RandomState', (['[12891298, 843084]'], {}), '([12891298, 843084])\n', (38234, 38254), False, 'from numpy.random import RandomState\n'), ((39233, 39251), 'arch.univariate.volatility.ConstantVariance', 'ConstantVariance', ([], {}), '()\n', (39249, 39251), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((39278, 39286), 'arch.univariate.distribution.Normal', 'Normal', ([], {}), '()\n', (39284, 39286), False, 'from arch.univariate.distribution import GeneralizedError, Normal, SkewStudent, StudentsT\n'), ((39377, 39395), 'arch.univariate.volatility.ConstantVariance', 'ConstantVariance', ([], {}), '()\n', (39393, 39395), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((39410, 39418), 'arch.univariate.distribution.Normal', 'Normal', ([], {}), '()\n', (39416, 39418), False, 'from arch.univariate.distribution import GeneralizedError, Normal, SkewStudent, StudentsT\n'), ((40471, 40483), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (40478, 40483), True, 'import numpy as np\n'), ((41083, 41114), 'numpy.power', 'np.power', (['(nobs / 100.0)', '(1 / 4.0)'], {}), '(nobs / 100.0, 1 / 4.0)\n', (41091, 41114), True, 'import numpy as np\n'), ((46211, 46244), 'itertools.product', 'product', (['*([ascii_lowercase] * 3)'], {}), '(*([ascii_lowercase] * 3))\n', (46218, 46244), False, 'from itertools import product\n'), ((47740, 47756), 'numpy.isfinite', 'np.isfinite', (['cv1'], {}), '(cv1)\n', (47751, 47756), True, 'import numpy as np\n'), ((47766, 47782), 'numpy.isfinite', 'np.isfinite', (['cv2'], {}), '(cv2)\n', (47777, 47782), True, 'import numpy as np\n'), ((47840, 47855), 'numpy.isfinite', 'np.isfinite', (['r1'], {}), '(r1)\n', (47851, 47855), True, 'import numpy as np\n'), ((47865, 47880), 'numpy.isfinite', 'np.isfinite', (['r2'], {}), '(r2)\n', (47876, 47880), True, 'import numpy as np\n'), ((3624, 3636), 'numpy.array', 'np.array', (['(10)'], {}), '(10)\n', (3632, 3636), True, 'import numpy as np\n'), ((5583, 5603), 'numpy.mean', 'np.mean', (['(self.y ** 2)'], {}), '(self.y ** 2)\n', (5590, 5603), True, 'import numpy as np\n'), ((7401, 7421), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (7409, 7421), True, 'import numpy as np\n'), ((22946, 22972), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (22954, 22972), True, 'import numpy as np\n'), ((23164, 23173), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (23170, 23173), True, 'import numpy as np\n'), ((41829, 41842), 'scipy.stats.chi2', 'stats.chi2', (['(5)'], {}), '(5)\n', (41839, 41842), False, 'from scipy import stats\n'), ((42056, 42070), 'scipy.stats.chi2', 'stats.chi2', (['df'], {}), '(df)\n', (42066, 42070), False, 'from scipy import stats\n'), ((46507, 46514), 'arch.univariate.volatility.GARCH', 'GARCH', ([], {}), '()\n', (46512, 46514), False, 'from arch.univariate.volatility import APARCH, ARCH, EGARCH, FIGARCH, GARCH, HARCH, ConstantVariance, EWMAVariance, FixedVariance, MIDASHyperbolic, RiskMetrics2006\n'), ((1820, 1832), 'arch.data.sp500.load', 'sp500.load', ([], {}), '()\n', (1830, 1832), False, 'from arch.data import sp500\n')] |
import random
import math
import numpy as np
import cv2
import matplotlib.pyplot as plt
__author__ = '__Girish_Hegde__'
class Sampler:
def __init__(self, radius=1, center=(0, 0), method='rejection_sample'):
self.radius = radius
self.r2 = radius**2
self.cx, self.cy = center
self._sample_unit_circle = getattr(self, method)
def sample(self, n=1):
points = []
for i in range(n):
x, y = self._sample_unit_circle()
# translate (x, y)
x, y = self.cx + x*self.radius, self.cy + y*self.radius
points.append((x, y))
return np.array(points)
def rejection_sample(self):
"""Rejection Sampling
1. point = uniform()
2. if point satisfies required equation: return point
3. else: repeat 1, 2, 3
For circle:
eqution: x2 + y2 = r2
prob(success) = area(circle)/area(square)
= (pi.r2)/(4r2)
= pi/4 = 78.5%
average trials per sample = 4/pi = 1.28
"""
x = (random.random() - 0.5)*2
y = (random.random() - 0.5)*2
# check if (x, y) inside unit circle
while not x*x + y*y < 1:
# uniform(-1, 1)
x = (random.random() - 0.5)*2
y = (random.random() - 0.5)*2
return (x, y)
def ITS(self):
"""Inverse Transform Samples
1. Get pdf
2. cdf = integration(pdf)
3. Get inverse cdf
3. sample = inverse_cdf(uniform sample)
For circle:
polar coords -> (r, theta)
theta = uniform(0, 2.pi)
radius sampling:
r != uniform(0, 1) <- as radius increases perimeter increases
hence uniform sampling -> samples(large radius) < samples(small radius)
-----------------------------------------------------------------------
samples = k.perimeter = k.2.pi.r
pdf = mr
integral(pdf) = prob(samples space) = 1
mr2/2 = 1
m = 2
pdf = 2r
cdf = integral(pdf) = r2
r = sqrt(uniform(0, 1)) <- inverse transform
"""
# uniform_sample(0, 2.pi)
theta = random.random()*2*math.pi
# r = sqrt(uniform(0, 1)) <- inverse transform
radius = math.sqrt(random.random())
# polar -> cartesian
x = radius*math.cos(theta)
y = radius*math.sin(theta)
return (x, y)
def triangle_sample(self):
""" Zero Area Triangle Sampling
1. consider a parallellogram
2. v1 = sample a vector along 1 side
3. v2 = sample a vector along adjacent side
4. point inside ||gm = v1 + v2
5. point inside triangle = v1 + v2 if inside diagonal else reflect v1 + v2
For circle:
consider circle is made up of infinite triangles of area zero -> lines
sample = uniform(0, 1) + uniform(0, 1)
radius = sample if sample < 1 else 2 - sample
choose triangle -> theta = uniform(0, 2.pi)
Note: pdf(of this method) == pdf(ITS method above) = 2r
"""
# uniform_sample(0, 2.pi)
theta = random.random()*2*math.pi
radius = random.random() + random.random()
if radius >= 1:
radius = 2 - radius
# polar -> cartesian
x = radius*math.cos(theta)
y = radius*math.sin(theta)
return (x, y)
def main():
r = 200
# cx, cy = -200, 100
cx, cy = 0, 0
n = 10000
clr = (0, 0, 255)
hw = 3*(max(abs(cx), abs(cy)) + r)
if hw%2 == 0:
hw += 1
coord_frame = np.zeros((hw, hw, 3), dtype=np.uint8)
coord_frame[hw//2, :] = 255
coord_frame[:, hw//2] = 255
cv2.circle(coord_frame, (hw//2 + cx, hw - (hw//2 +cy)), r, (255, 0, 0))
sampler = Sampler(r, (cx, cy), 'ITS')
# sampler = Sampler(r, (cx, cy), 'rejection_sample')
# sampler = Sampler(r, (cx, cy), 'triangle_sample')
samples = sampler.sample(n).astype(np.int32)
for pt in samples:
coord_frame[hw - (hw//2 + pt[1]), hw//2 + pt[0]] = clr
cv2.imshow("circle sampling", coord_frame)
k = cv2.waitKey(1)
if k == ord('q'):
break
# coord_frame[hw - 1 - (hw//2 + samples[:, 1]), hw//2 + samples[:, 0]] = [0, 0, 255]
# cv2.imshow("circle sampling", coord_frame)
# cv2.waitKey()
# cv2.destroyAllWindows()
if __name__ == '__main__':
main() | [
"cv2.imshow",
"math.cos",
"numpy.array",
"cv2.circle",
"numpy.zeros",
"random.random",
"math.sin",
"cv2.waitKey"
] | [((3885, 3922), 'numpy.zeros', 'np.zeros', (['(hw, hw, 3)'], {'dtype': 'np.uint8'}), '((hw, hw, 3), dtype=np.uint8)\n', (3893, 3922), True, 'import numpy as np\n'), ((3995, 4071), 'cv2.circle', 'cv2.circle', (['coord_frame', '(hw // 2 + cx, hw - (hw // 2 + cy))', 'r', '(255, 0, 0)'], {}), '(coord_frame, (hw // 2 + cx, hw - (hw // 2 + cy)), r, (255, 0, 0))\n', (4005, 4071), False, 'import cv2\n'), ((662, 678), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (670, 678), True, 'import numpy as np\n'), ((4386, 4428), 'cv2.imshow', 'cv2.imshow', (['"""circle sampling"""', 'coord_frame'], {}), "('circle sampling', coord_frame)\n", (4396, 4428), False, 'import cv2\n'), ((4442, 4456), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4453, 4456), False, 'import cv2\n'), ((2517, 2532), 'random.random', 'random.random', ([], {}), '()\n', (2530, 2532), False, 'import random\n'), ((2584, 2599), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (2592, 2599), False, 'import math\n'), ((2620, 2635), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (2628, 2635), False, 'import math\n'), ((3458, 3473), 'random.random', 'random.random', ([], {}), '()\n', (3471, 3473), False, 'import random\n'), ((3476, 3491), 'random.random', 'random.random', ([], {}), '()\n', (3489, 3491), False, 'import random\n'), ((3600, 3615), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (3608, 3615), False, 'import math\n'), ((3636, 3651), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (3644, 3651), False, 'import math\n'), ((1161, 1176), 'random.random', 'random.random', ([], {}), '()\n', (1174, 1176), False, 'import random\n'), ((1200, 1215), 'random.random', 'random.random', ([], {}), '()\n', (1213, 1215), False, 'import random\n'), ((2407, 2422), 'random.random', 'random.random', ([], {}), '()\n', (2420, 2422), False, 'import random\n'), ((3414, 3429), 'random.random', 'random.random', ([], {}), '()\n', (3427, 3429), False, 'import random\n'), ((1353, 1368), 'random.random', 'random.random', ([], {}), '()\n', (1366, 1368), False, 'import random\n'), ((1396, 1411), 'random.random', 'random.random', ([], {}), '()\n', (1409, 1411), False, 'import random\n')] |
"""
Trainer for BiGAN/ALI
"""
import numpy as np
import torch
from torch.autograd import Variable
from tqdm import tqdm
from ....common import FloatTensor
from ....utils.plot import get_visdom_line_plotter
class Trainer(object):
def __init__(self, trick_dict=None):
if trick_dict is None:
self.trick_dict = {}
else:
self.trick_dict = trick_dict
self.global_step = 0
self.plotter = get_visdom_line_plotter('main')
def _create_real_data(self, raw_real_data):
noisy_input = self.trick_dict.get('noisy_input', None)
if noisy_input:
raw_real_data = raw_real_data + torch.from_numpy(
np.random.randn(*raw_real_data.shape) * noisy_input['sigma']).type(torch.FloatTensor)
noisy_input['sigma'] = max(0, noisy_input['sigma'] - noisy_input['decay'])
real_data = raw_real_data.type(FloatTensor)
return real_data
def _create_valid(self, batch_size):
soft_label = self.trick_dict.get('label_smooth', None)
if soft_label:
valid_range = soft_label['valid_range']
else:
valid_range = 1.
if isinstance(valid_range, list):
valid = Variable(FloatTensor(batch_size, 1).uniform_(*valid_range), requires_grad=False)
else:
valid = Variable(FloatTensor(batch_size, 1).fill_(valid_range), requires_grad=False)
return valid
def _create_fake(self, batch_size):
soft_label = self.trick_dict.get('label_smooth', None)
if soft_label:
fake_range = soft_label['fake_range']
else:
fake_range = 0.
if isinstance(fake_range, list):
fake = Variable(FloatTensor(batch_size, 1).uniform_(*fake_range), requires_grad=False)
else:
fake = Variable(FloatTensor(batch_size, 1).fill_(fake_range), requires_grad=False)
return fake
def train(self, num_epoch, data_loader, gan_model, checkpoint_path, epoch_per_save, callbacks):
for epoch in range(num_epoch):
# we sample a batch after each epoch
dis_loss_lst = []
gen_loss_lst = []
D_x_lst = []
D_G_z_lst = []
# plot smoothing
smooth_factor = 0.95
plot_dis_s = 0
plot_gen_s = 0
plot_D_x = 0
plot_D_G_z = 0
plot_ws = 0
print('Epoch {}'.format(epoch + 1))
for input_and_aux in tqdm(data_loader):
# We assume the input_and_label is a tuple containing data and auxiliary information
# Adversarial ground truths
batch_size = input_and_aux[0].shape[0]
valid = self._create_valid(batch_size)
fake = self._create_fake(batch_size)
flip_label = self.trick_dict.get('flip_label', None)
if flip_label and (self.global_step + 1) % flip_label['num_steps_per_flip'] == 0:
valid, fake = fake, valid
# sample noise
z = Variable(FloatTensor(np.random.normal(0, 1, (batch_size, gan_model.code_size))))
real_data = self._create_real_data(input_and_aux[0])
d_loss, g_loss, D_x, D_G_z = gan_model._train(real_data, z, valid, fake)
dis_loss = d_loss.item()
gen_loss = g_loss.item()
plot_dis_s = plot_dis_s * smooth_factor + dis_loss * (1 - smooth_factor)
plot_gen_s = plot_gen_s * smooth_factor + gen_loss * (1 - smooth_factor)
plot_D_x = plot_D_x * smooth_factor + D_x.item() * (1 - smooth_factor)
plot_D_G_z = plot_D_G_z * smooth_factor + D_G_z.item() * (1 - smooth_factor)
plot_ws = plot_ws * smooth_factor + (1 - smooth_factor)
dis_loss_lst.append(plot_dis_s / plot_ws)
gen_loss_lst.append(plot_gen_s / plot_ws)
D_x_lst.append(plot_D_x / plot_ws)
D_G_z_lst.append(plot_D_G_z / plot_ws)
self.global_step += 1
noisy_input = self.trick_dict.get('noisy_input', None)
if noisy_input:
print('Noisy input sigma: {:.4f}'.format(noisy_input['sigma']))
if checkpoint_path and (epoch + 1) % epoch_per_save == 0:
gan_model.save_checkpoint(checkpoint_path)
# plot loss figure
step = [a for a in range(self.global_step - len(dis_loss_lst), self.global_step)]
data = np.array([dis_loss_lst, gen_loss_lst]).transpose()
legend = ['dis_loss', 'gen_loss']
self.plotter.plot('gan_loss', legend, step, data)
data = np.array([D_x_lst, D_G_z_lst]).transpose()
legend = ['D_x', 'D_G_z']
self.plotter.plot('gan_output', legend, step, data)
# callbacks
for callback in callbacks:
callback(self, gan_model)
if checkpoint_path:
gan_model.save_checkpoint(checkpoint_path)
| [
"numpy.random.normal",
"numpy.array",
"tqdm.tqdm",
"numpy.random.randn"
] | [((2503, 2520), 'tqdm.tqdm', 'tqdm', (['data_loader'], {}), '(data_loader)\n', (2507, 2520), False, 'from tqdm import tqdm\n'), ((4567, 4605), 'numpy.array', 'np.array', (['[dis_loss_lst, gen_loss_lst]'], {}), '([dis_loss_lst, gen_loss_lst])\n', (4575, 4605), True, 'import numpy as np\n'), ((4745, 4775), 'numpy.array', 'np.array', (['[D_x_lst, D_G_z_lst]'], {}), '([D_x_lst, D_G_z_lst])\n', (4753, 4775), True, 'import numpy as np\n'), ((3119, 3176), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(batch_size, gan_model.code_size)'], {}), '(0, 1, (batch_size, gan_model.code_size))\n', (3135, 3176), True, 'import numpy as np\n'), ((691, 728), 'numpy.random.randn', 'np.random.randn', (['*raw_real_data.shape'], {}), '(*raw_real_data.shape)\n', (706, 728), True, 'import numpy as np\n')] |
'''
SFCMapper.py
Updated: 2/6/18
This script contains methods to generate space filling curves and uses them to
map high dimensional data into lower dimensions.
'''
import numpy as np
class SFCMapper(object):
"""
SFCMapper object is used to generate 3D and 2D space filling curves and map
the traveseral of 3D curves to 2D curves. This allows 3D discrete spaces to
be reduced to 2D discrete spaces.
"""
def __init__(self, size_3d):
'''
Method generates 2D and 3D space filling curves used for mapping 3D to
2D.
Param:
size_3d - int ; length of 3D space
'''
# Set size variables
self.size_3d = size_3d
self.size_2d = np.sqrt(self.size_3d**3)
if self.size_2d % 1.0 != 0:
print("Error: 3D Space not mappable to 2D with Hilbert Curve.");exit()
else: self.size_2d = int(self.size_2d)
# Generate Curves
print("Generating Space-Filling Curves...")
self.curve_3d = self.__hilbert_3d(int(np.log2(self.size_3d)))
self.curve_2d = self.__hilbert_2d(int(np.log2(self.size_2d)))
def map_3d_to_2d(self, array_3d):
'''
Method proceses 3D array and encodes into 2D using SFCs.
Param:
array_3d - np.array
Return:
array_2d - np.array
'''
s = int(np.sqrt(len(self.curve_2d)))
array_2d = np.zeros([s,s])
for i in range(s**2):
c2d = self.curve_2d[i]
c3d = self.curve_3d[i]
array_2d[c2d[0], c2d[1]] = array_3d[c3d[0], c3d[1], c3d[2]]
return array_2d
def __hilbert_3d(self, order):
'''
Method generates 3D hilbert curve of desired order.
Param:
order - int ; order of curve
Returns:
np.array ; list of (x, y, z) coordinates of curve
'''
def gen_3d(order, x, y, z, xi, xj, xk, yi, yj, yk, zi, zj, zk, array):
if order == 0:
xx = x + (xi + yi + zi)/3
yy = y + (xj + yj + zj)/3
zz = z + (xk + yk + zk)/3
array.append((xx, yy, zz))
else:
gen_3d(order-1, x, y, z, yi/2, yj/2, yk/2, zi/2, zj/2, zk/2, xi/2, xj/2, xk/2, array)
gen_3d(order-1, x + xi/2, y + xj/2, z + xk/2, zi/2, zj/2, zk/2, xi/2, xj/2, xk/2,
yi/2, yj/2, yk/2, array)
gen_3d(order-1, x + xi/2 + yi/2, y + xj/2 + yj/2, z + xk/2 + yk/2, zi/2, zj/2, zk/2,
xi/2, xj/2, xk/2, yi/2, yj/2, yk/2, array)
gen_3d(order-1, x + xi/2 + yi, y + xj/2+ yj, z + xk/2 + yk, -xi/2, -xj/2, -xk/2, -yi/2,
-yj/2, -yk/2, zi/2, zj/2, zk/2, array)
gen_3d(order-1, x + xi/2 + yi + zi/2, y + xj/2 + yj + zj/2, z + xk/2 + yk +zk/2, -xi/2,
-xj/2, -xk/2, -yi/2, -yj/2, -yk/2, zi/2, zj/2, zk/2, array)
gen_3d(order-1, x + xi/2 + yi + zi, y + xj/2 + yj + zj, z + xk/2 + yk + zk, -zi/2, -zj/2,
-zk/2, xi/2, xj/2, xk/2, -yi/2, -yj/2, -yk/2, array)
gen_3d(order-1, x + xi/2 + yi/2 + zi, y + xj/2 + yj/2 + zj , z + xk/2 + yk/2 + zk, -zi/2,
-zj/2, -zk/2, xi/2, xj/2, xk/2, -yi/2, -yj/2, -yk/2, array)
gen_3d(order-1, x + xi/2 + zi, y + xj/2 + zj, z + xk/2 + zk, yi/2, yj/2, yk/2, -zi/2, -zj/2,
-zk/2, -xi/2, -xj/2, -xk/2, array)
n = pow(2, order)
hilbert_curve = []
gen_3d(order, 0, 0, 0, n, 0, 0, 0, n, 0, 0, 0, n, hilbert_curve)
return np.array(hilbert_curve).astype('int')
def __hilbert_2d(self, order):
'''
Method generates 2D hilbert curve of desired order.
Param:
order - int ; order of curve
Returns:
np.array ; list of (x, y) coordinates of curve
'''
def gen_2d(order, x, y, xi, xj, yi, yj, array):
if order == 0:
xx = x + (xi + yi)/2
yy = y + (xj + yj)/2
array.append((xx, yy))
else:
gen_2d(order-1, x, y, yi/2, yj/2, xi/2, xj/2, array)
gen_2d(order-1, x + xi/2, y + xj/2, xi/2, xj/2, yi/2, yj/2, array)
gen_2d(order-1, x + xi/2 + yi/2, y + xj/2 + yj/2, xi/2, xj/2, yi/2, yj/2, array)
gen_2d(order-1, x + xi/2 + yi, y + xj/2 + yj, -yi/2,-yj/2,-xi/2,-xj/2, array)
n = pow(2, order)
hilbert_curve = []
gen_2d(order, 0, 0, n, 0, 0, n, hilbert_curve)
return np.array(hilbert_curve).astype('int')
| [
"numpy.array",
"numpy.log2",
"numpy.zeros",
"numpy.sqrt"
] | [((723, 749), 'numpy.sqrt', 'np.sqrt', (['(self.size_3d ** 3)'], {}), '(self.size_3d ** 3)\n', (730, 749), True, 'import numpy as np\n'), ((1423, 1439), 'numpy.zeros', 'np.zeros', (['[s, s]'], {}), '([s, s])\n', (1431, 1439), True, 'import numpy as np\n'), ((1039, 1060), 'numpy.log2', 'np.log2', (['self.size_3d'], {}), '(self.size_3d)\n', (1046, 1060), True, 'import numpy as np\n'), ((1109, 1130), 'numpy.log2', 'np.log2', (['self.size_2d'], {}), '(self.size_2d)\n', (1116, 1130), True, 'import numpy as np\n'), ((3667, 3690), 'numpy.array', 'np.array', (['hilbert_curve'], {}), '(hilbert_curve)\n', (3675, 3690), True, 'import numpy as np\n'), ((4642, 4665), 'numpy.array', 'np.array', (['hilbert_curve'], {}), '(hilbert_curve)\n', (4650, 4665), True, 'import numpy as np\n')] |
import inspect
from abc import abstractmethod, ABCMeta
from typing import Callable, Union, Optional, List
from joblib import Memory
# import cupy as cp
from warnings import warn
import numpy as np
from scipy.integrate import quad
# from Operator import Quadrature
from decorators import timer, vectorize
location = './cachedir'
memory = Memory(location, verbose=0, bytes_limit=1024 * 1024 * 1024)
class EstimatorAbstract(metaclass=ABCMeta):
@abstractmethod
def estimate(self, *args, **kwargs):
...
@abstractmethod
def refresh(self, *args, **kwargs):
...
@abstractmethod
def estimate_q(self, *args, **kwargs):
...
@abstractmethod
def estimate_delta(self, *args, **kwargs):
...
# class EstimatorDiscretize(EstimatorAbstract, Quadrature):
# def __init__(self, kernel: Callable, lower: Union[float, int], upper: Union[float, int], grid_size: int,
# observations: np.ndarray, sample_size: int, quadrature: str = 'rectangle'):
# Quadrature.__init__(self, lower, upper, grid_size)
# try:
# kernel(np.array([1, 2]), np.array([1, 2]))
# self.kernel: Callable = kernel
# except ValueError:
# warn('Force vectorization of kernel')
# self.kernel: Callable = np.vectorize(kernel)
# assert quadrature in ['rectangle', 'dummy'], 'This type of quadrature is not supported, currently only {} ' \
# 'are supported'.format(
# [method for method in dir(Quadrature) if not method.startswith('_')])
# assert callable(kernel), 'Kernel function must be callable'
# assert isinstance(observations,
# np.ndarray), 'Observations must be provided as numpy array, but {} was provided'.format(
# observations)
# assert isinstance(sample_size, int), 'Sample size must be an integer'
# self.lower: float = float(lower)
# self.upper: float = float(upper)
# self.grid_size: int = grid_size
# self.quadrature: Callable = getattr(super(), quadrature)
# self.__observations: np.ndarray = observations.astype(float)
# self.sample_size: int = sample_size
# self.__delta: float = 0.
# self.__q_estimator: cp.ndarray = cp.empty(self.grid_size, dtype=cp.float64)
# self.__grid: np.ndarray = getattr(super(), quadrature + '_grid')()
# self.__weights_np: np.ndarray = self.quadrature(self.__grid)
# self.__weights: cp.ndarray = cp.asarray(self.quadrature(self.__grid))
#
# @property
# def delta(self) -> float:
# return self.__delta
#
# @delta.setter
# def delta(self, delta: float):
# self.__delta = delta
#
# @property
# def q_estimator(self) -> cp.ndarray:
# return self.__q_estimator
#
# @q_estimator.setter
# def q_estimator(self, q_estimator: cp.ndarray):
# self.__q_estimator = q_estimator
#
# @property
# def observations(self) -> np.ndarray:
# return self.__observations
#
# @observations.setter
# def observations(self, observations: np.ndarray):
# self.__observations = observations
#
# @timer
# def estimate_q(self):
# """
# Estimate function q on given grid based on the observations.
# """
# print('Estimating q function...')
# estimator_list: List[np.ndarray] = \
# [np.divide(np.sum(self.kernel(x, self.__observations)), self.sample_size) for x in self.__grid]
# estimator: np.ndarray = np.stack(estimator_list, axis=0).astype(np.float64)
# self.__q_estimator = cp.asarray(estimator)
#
# @timer
# def estimate_delta(self):
# """
# Estimate noise level based on the observations and approximation of function w.
# """
# print('Estimating noise level...')
# w_function_list: List[np.ndarray] = \
# [np.sum(np.multiply(self.__weights_np, np.square(self.kernel(self.__grid, y)))) for y in
# self.__observations]
# w_function: np.ndarray = np.stack(w_function_list, axis=0)
# delta: float = np.sqrt(np.divide(np.sum(w_function), np.square(self.sample_size)))
# self.__delta = delta
# print('Estimated noise level: {}'.format(delta))
#
# def L2norm(self, x: cp.ndarray, y: cp.ndarray) -> cp.ndarray:
# """
# Calculate the approximation of L2 norm of difference of two approximation of function.
# :param x: Approximation of function on given grid.
# :type x: np.ndarray
# :param y: Approximation of function on given grid.
# :type y: np.ndarray
# :return: Float representing the L2 norm of difference between given functions.
# """
# return cp.sqrt(cp.sum(cp.multiply(cp.square(cp.subtract(x, y)), self.__weights)))
#
# def estimate(self):
# raise NotImplementedError
#
# def refresh(self):
# raise NotImplementedError
class EstimatorSpectrum(EstimatorAbstract):
def __init__(self, kernel: Callable, observations: np.ndarray, sample_size: int, transformed_measure: bool,
lower: Union[float, int] = 0, upper: Union[float, int] = 1):
assert isinstance(transformed_measure, bool), 'Please provide an information about measure transformation as ' \
'True or False'
self.transformed_measure = transformed_measure
assert isinstance(kernel, Callable), 'Kernel function must be callable'
try:
kernel(np.array([1, 2]), np.array([1, 2]))
self.kernel: Callable = kernel
except ValueError:
warn('Force vectorization of kernel')
self.kernel: Callable = np.vectorize(kernel)
assert isinstance(lower, (int, float)), 'Lower bound for integration interval must be a number, but ' \
'was {} provided'.format(lower)
self.lower: Union[float, int] = lower
assert isinstance(upper, (int, float)), 'Upper bound for integration interval must be a number, but' \
' was {} provided'.format(upper)
self.upper: Union[float, int] = upper
assert isinstance(observations, np.ndarray), 'Please provide the observations in a form of numpy array'
self.__observations: np.ndarray = observations
assert isinstance(sample_size, int), 'Sample size must be an integer, but was {} provided'.format(sample_size)
self.sample_size: int = sample_size
self.q_estimator: Optional[Callable] = None
self.__w_function: Optional[Callable] = None
self.delta: float = 0.
@property
def observations(self) -> np.ndarray:
return self.__observations
@observations.setter
def observations(self, observations: np.ndarray):
self.__observations = observations
@timer
def estimate_q(self) -> None:
"""
Estimate function q based on the observations using the known kernel.
"""
print('Estimating q function...')
observations: np.ndarray = self.observations
kernel: Callable = self.kernel
sample_size: int = self.sample_size
if self.transformed_measure:
def __q_estimator(x: Union[float, int]) -> np.float64:
x: np.ndarray = np.repeat(x, observations.shape[0])
return np.divide(np.multiply(2, np.sum(np.less(observations, x))), sample_size)
else:
def __q_estimator(x: Union[float, int]) -> np.float64:
x: np.ndarray = np.repeat(x, observations.shape[0])
return np.divide(np.sum(kernel(x, observations)), sample_size)
self.q_estimator = np.vectorize(__q_estimator)
@timer
def estimate_delta(self):
"""
Estimate noise level based on the observations and known kernel (via w function).
"""
print('Estimating noise level...')
if self.transformed_measure:
self.delta = np.sqrt(np.divide(2*np.sum(1 - np.square(self.observations)), self.sample_size ** 2))
else:
kernel: Callable = self.kernel
lower = self.lower
upper = self.upper
def kernel_integrand(x: float, y: float) -> np.float64:
return np.square(kernel(x, y))
@vectorize(signature='()->()')
def w_function(y: float) -> float:
return quad(kernel_integrand, lower, upper, args=y, limit=10000)[0]
@memory.cache
def delta_estimator_helper_nontransformed(observations: np.ndarray, sample_size: int,
kernel_formula: str) -> float:
return np.sqrt(np.divide(np.sum(w_function(observations)), sample_size ** 2))
self.delta = delta_estimator_helper_nontransformed(self.observations, self.sample_size,
inspect.getsource(kernel).split('return')[1].strip())
print('Estimated noise level: {}'.format(self.delta))
def estimate(self, *args, **kwargs):
raise NotImplementedError
def refresh(self, *args, **kwargs):
raise NotImplementedError
| [
"decorators.vectorize",
"numpy.less",
"numpy.repeat",
"scipy.integrate.quad",
"numpy.square",
"joblib.Memory",
"numpy.array",
"warnings.warn",
"numpy.vectorize",
"inspect.getsource"
] | [((342, 401), 'joblib.Memory', 'Memory', (['location'], {'verbose': '(0)', 'bytes_limit': '(1024 * 1024 * 1024)'}), '(location, verbose=0, bytes_limit=1024 * 1024 * 1024)\n', (348, 401), False, 'from joblib import Memory\n'), ((7869, 7896), 'numpy.vectorize', 'np.vectorize', (['__q_estimator'], {}), '(__q_estimator)\n', (7881, 7896), True, 'import numpy as np\n'), ((8493, 8522), 'decorators.vectorize', 'vectorize', ([], {'signature': '"""()->()"""'}), "(signature='()->()')\n", (8502, 8522), False, 'from decorators import timer, vectorize\n'), ((5648, 5664), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (5656, 5664), True, 'import numpy as np\n'), ((5666, 5682), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (5674, 5682), True, 'import numpy as np\n'), ((5766, 5803), 'warnings.warn', 'warn', (['"""Force vectorization of kernel"""'], {}), "('Force vectorization of kernel')\n", (5770, 5803), False, 'from warnings import warn\n'), ((5840, 5860), 'numpy.vectorize', 'np.vectorize', (['kernel'], {}), '(kernel)\n', (5852, 5860), True, 'import numpy as np\n'), ((7481, 7516), 'numpy.repeat', 'np.repeat', (['x', 'observations.shape[0]'], {}), '(x, observations.shape[0])\n', (7490, 7516), True, 'import numpy as np\n'), ((7726, 7761), 'numpy.repeat', 'np.repeat', (['x', 'observations.shape[0]'], {}), '(x, observations.shape[0])\n', (7735, 7761), True, 'import numpy as np\n'), ((8593, 8650), 'scipy.integrate.quad', 'quad', (['kernel_integrand', 'lower', 'upper'], {'args': 'y', 'limit': '(10000)'}), '(kernel_integrand, lower, upper, args=y, limit=10000)\n', (8597, 8650), False, 'from scipy.integrate import quad\n'), ((7572, 7596), 'numpy.less', 'np.less', (['observations', 'x'], {}), '(observations, x)\n', (7579, 7596), True, 'import numpy as np\n'), ((8189, 8217), 'numpy.square', 'np.square', (['self.observations'], {}), '(self.observations)\n', (8198, 8217), True, 'import numpy as np\n'), ((9122, 9147), 'inspect.getsource', 'inspect.getsource', (['kernel'], {}), '(kernel)\n', (9139, 9147), False, 'import inspect\n')] |
from dataclasses import dataclass
import flowpost.wake.helpers.wake_stats as ws
from wake_config import WakeCaseParams
import flowpost.IO.pyTecIO.tecreader as tecreader
import os
import numpy as np
from ...calc.stats import VelocityStatistics, ReynoldsStresses
###############################################################################
# Some data classes, never used
class DataField():
def __init__(self):
dims = 2
struct_data = False
class Coordinates:
def __init__(self, x=None,y=None,z=None):
self.x = x
self.y = y
self.z = z
@dataclass(init=False)
class FieldSeries():
"""NetCDF file
Loads the input file with the NetCFD (.nc) format and
initialize the variables.
"""
u: np.ndarray
v: np.ndarray
w: np.ndarray
'''
def __init__(self, time=0, x=None,y=None, z=None,u=None,v=None, w=None, struct_data=False, planar=True):
#vel = VelocityField(self)
#DataField.__init__(self)
#self.coords = {}
self.vel = {}
self.set_velocities(u,v,w)
self.planar = planar
#self.sizex=cols
#self.sizey=rows
#print(str(cols) + ' cols by ' + str(rows) + ' rows')
self.mean_u, self.mean_v, self.mean_w = compute_means(u,v,w)
self.gradients = {}
self.set_velocities(u,v,w)
'''
def set_velocities(self,u,v,w):
self.u = u
self.v = v
self.w = w
def set_coords(self,x,y,z):
self.x = x
self.y = y
self.z = z
def computeGradients(self):
dudy,dudx=np.gradient(self.vx,-self.dy/1000,self.dx/1000)
dvdy,dvdx=np.gradient(self.vy,-self.dy/1000,self.dx/1000)
self.gradients['dudy']=dudy
self.gradients['dudx']=dudx
self.gradients['dvdy']=dvdy
self.gradients['dvdx']=dvdx
# TODO and so on
#skip = 0
#self.u = np.array(u).reshape(self.sizey,self.sizex)
#self.v = np.array(w).reshape(self.sizey,self.sizex)
#self.u = self.u[:,skip:]
#self.v = self.v[:,skip:]
'''
@dataclass
class ReynoldsStress():
uu: np.ndarray = None
vv: np.ndarray = None
ww: np.ndarray = None
uv: np.ndarray = None
uw: np.ndarray = None
vw: np.ndarray = None
kt: np.ndarray = None
def set_unnamed(self, initial_data):
print(initial_data)
for key in initial_data:
setattr(self, key, initial_data[key])
# https://stackoverflow.com/questions/2466191/set-attributes-from-dictionary-in-python
def set_values(self, *initial_data, **kwargs):
print('setting values')
print(initial_data)
for dictionary in initial_data:
print(initial_data)
for key in dictionary:
print('setting ' + str(key))
setattr(self, key, dictionary[key])
for key in kwargs:
setattr(self, key, kwargs[key])
'''
@dataclass
class WakeField():
vel: FieldSeries = None
vel_prime: FieldSeries = None
dataset = None # Tecplot dataset
stats = VelocityStatistics()
cs: str = 'AC'
coords: Coordinates = None
param: WakeCaseParams = None
#def set_coords(self, x, y, z):
# self.coords = Coordinates(x=x, y=y, z=z)
def set_coords(self,x,y,z):
self.x = x
self.y = y
self.z = z
def rotate_CS(self, CSname):
print('rotating by ' + str(self.param.aoa))
ws.rotate_dataset(self.dataset, self.param.x_PMR, self.param.z_PMR, self.param.aoa)
x_WT, z_WT = ws.transform_wake_coords(self.vel.x,self.vel.z, self.param.x_PMR, self.param.z_PMR, self.param.aoa)
u_WT, w_WT = ws.rotate_velocities(self.vel.u, self.vel.v, self.vel.w, self.param.x_PMR, self.param.z_PMR, self.param.aoa)
self.vel.u = u_WT
self.vel.w = w_WT
self.cs = CSname
self.set_coords(x_WT, self.y, z_WT)
def compute_rstresses(self, do_save = False):
self.stats.compute_rstresses(do_save = do_save, vel = self.vel)
'''
#uu,vv,ww,uv,uw,vw = ws.calc_rstresses(u,v,w)
#self.rstresses = ReynoldsStress
#self.rstresses.set_values()
self.stats.rstresses = ws.calc_rstresses(self.vel.u, self.vel.v, self.vel.w, return_dict=True)
self.stats.rstresses['kt'] = 0.5* (self.rstresses['uu'] + self.rstresses['vv'] + self.rstresses['ww'])
#print('d: ' + str(d))
#self.rstresses.set_unnamed(d)
#print(type(self.rstresses))
#print(type(self.rstresses['uu']))
#self.rstresses.uu,vv,ww,uv,uw,vw = ws.calc_rstresses(u_WT,v,w_WT)
if do_save:
self.save_rstresses(self.rstresses, res_path = self.param.res_path, file_prefix = self.param.case_name+'_'+ self.param.plane_name)
'''
def field_PSD(self, data, dt = 1, n_bins = 2, n_overlap = 0.5, window = 'hann'):
import scipy
n_points = data.shape[0]
n_samples = data.shape[1]
nperseg = np.round(n_samples / (1 + n_overlap*(n_bins - 1))) # this takes into account the overlap
print('computing Welch PSD for all points...')
print('temporal samples: ' + str(n_samples))
print('points per segment: ' + str(nperseg))
for point in range(n_points):
f, PSD = scipy.signal.welch(data[point, :], fs = 1 / dt,
window = window,
nperseg = nperseg,
scaling = 'density')
if point == 0:
f_mat = np.zeros([n_points, len(f)])
PSD_mat = np.zeros([n_points, len(f)])
f_mat[point, :] = f
PSD_mat[point, :] = PSD
return f_mat, PSD_mat
def compute_PSD(self, data, dt = None, n_bins = 2, n_overlap = 0.5, do_save = False):
if dt is None:
dt = self.param.dt
'''
if isinstance(data, list):
print('is a list')
print(self.vel.u.shape)
in_data = []
for entry in data:
in_data.append(getattr(self.vel, entry))
'''
# Compute the PSDs for each variable one by one and save them to disk
# immediately, as the results have a significant memory footprint
for var, name in zip([self.vel.u, self.vel.v, self.vel.w], ['u', 'v', 'w']):
f, PSD = self.field_PSD(var, dt = dt, n_bins=n_bins, n_overlap = n_overlap)
if do_save:
file_prefix = self.param.case_name+'_' + self.param.plane_name
filename = os.path.join(self.param.res_path, file_prefix + '_' + str(name) +'_PSD')
np.savez(filename, x=self.x, y=self.y, z=self.z, f=f[0,:], PSD=PSD)
def compute_skew_kurt(self, do_save = False):
from scipy.stats import kurtosis, skew
self.skew = {}
self.kurt = {}
for var, name in zip([self.vel.u, self.vel.v, self.vel.w], ['u', 'v', 'w']):
self.skew[name] = skew(var, axis=-1)
self.kurt[name] = kurtosis(var, axis=-1)
res_path = self.param.res_path
file_prefix = self.param.case_name+'_' + self.param.plane_name
save_var= {'skew_u': self.skew['u'], 'skew_v': self.skew['v'], 'skew_w': self.skew['w']}
filename = os.path.join(res_path, file_prefix + '_skewness.plt')
tecreader.save_plt(save_var, self.dataset, filename, addvars = True, removevars = True)
save_var= {'kurt_u': self.kurt['u'], 'kurt_v': self.kurt['v'], 'kurt_w': self.kurt['w']}
filename = os.path.join(res_path, file_prefix + '_kurtosis.plt')
tecreader.save_plt(save_var, self.dataset, filename, addvars = True, removevars = True)
def write_stats(self, stat_name, file_prefix = 'test_data'):
'''
Write stats. Select the specific variable using name.
Parameters
----------
name : str
the 1D signal
TODO: this can only work if means have been computed
TODO: add a loop to write multiple variables
TODO: understand how the logger works!
TODO: set proper name for the data variables inside the resulting PLT file
'''
out_path = self.cfg["case"]["res_path"]
try:
write_file(self.dataset, getattr(self.stats, stat_name), out_path, file_prefix)
except:
logger.error('No variable ' + str(stat_name) + ' in stats')
print('no variable!')
def save_means(self):
res_path = self.param.res_path
file_prefix = self.param.case_name+'_' + self.param.plane_name
filename = os.path.join(res_path, file_prefix + '_means.plt')
print(self.stats.mean)
tecreader.save_plt(self.stats.mean, self.dataset, filename, addvars = True, removevars = True)
def save_rstresses(self, rstress, res_path = None, file_prefix = None):
if res_path is None:
res_path = self.param.res_path
if file_prefix is None:
file_prefix = self.param.case_name+'_' + self.param.plane_name
# Save the results
try:
os.makedirs(res_path, mode = 0o777, exist_ok = True)
print("Directory '%s' created successfully" %res_path)
except:
print("Directory '%s' can not be created"%res_path)
save_var= {'uu': rstress['uu'], 'vv': rstress['vv'], 'ww': rstress['ww'], \
'uv': rstress['uv'], 'uw': rstress['uw'], 'vw': rstress['vw'], 'kt': rstress['kt']}
filename = os.path.join(res_path, file_prefix + '_rstresses.plt')
tecreader.save_plt(save_var, self.dataset, filename, addvars = True, removevars = True)
def compute_fluctuations(self):
self.vel.uprime, self.vel.vprime, self.vel.wprime = ws.compute_fluctuations(self.vel.u, self.vel.v, self.vel.w)
def compute_anisotropy(self, do_save = False):
self.stats.compute_anisotropy(do_save = do_save, vel = self.vel)
def compute_means(self):
self.stats.means(self.vel.u, 'u')
self.stats.means(self.vel.v, 'v')
self.stats.means(self.vel.w, 'w')
'''
mean_u = np.mean(u, axis=-1)
mean_v = np.mean(v, axis=-1)
mean_w = np.mean(w, axis=-1)
'''
def save_plt():
pass
def data_to_dict(**kwargs):
out = {}
for key, value in kwargs.items():
out[key] = value
return out
def transform(self):
ws.rotate_dataset(self.dataset, param.x_PMR, param.z_PMR, param.aoa)
x_WT, z_WT = ws.transform_wake_coords(vel.x,vel.z, param.x_PMR, param.z_PMR, param.aoa)
u_WT, w_WT = ws.rotate_velocities(vel.u, vel.v, vel.w, param.x_PMR, param.z_PMR, param.aoa)
def save_anisotropy(self, atensor, ev, C, res_path = None, file_prefix = None):
if res_path is None:
res_path = self.param.res_path
if file_prefix is None:
file_prefix = self.param.case_name+'_' + self.param.plane_name
# Save the results
try:
os.makedirs(res_path, mode = 0o777, exist_ok = True)
print("Directory '%s' created successfully" %res_path)
except:
print("Directory '%s' can not be created"%res_path)
save_var= {'a_uu': atensor['uu'], 'a_vv': atensor['vv'], 'a_ww': atensor['ww'], \
'a_uv': atensor['uv'], 'a_uw': atensor['uw'], 'a_vw': atensor['vw']}
filename = os.path.join(res_path, file_prefix + '_anisotropy_tensor.plt')
print(filename)
tecreader.save_plt(save_var, self.dataset, filename, addvars = True, removevars = True)
save_var= {'ev1': ev[0,:], 'ev2': ev[1,:], 'ev3': ev[2,:]}
filename = os.path.join(res_path, file_prefix + '_anisotropy_eigenvalues.plt')
tecreader.save_plt(save_var, self.dataset, filename, addvars = True, removevars = True)
save_var= {'C1': C[0,:], 'C2': C[1,:], 'C3': C[2,:]}
filename = os.path.join(res_path, file_prefix + '_anisotropy_components.plt')
tecreader.save_plt(save_var, self.dataset, filename, addvars = True, removevars = True)
def compute_independent_samples(self, acf_maxlag = 10, do_save = False):
# Compute the autocorrelation function at each point
uprime, _, wprime = ws.compute_fluctuations(self.vel.u, self.vel.v, self.vel.w)
self.acf_u = ws.compute_field_acf(uprime, acf_maxlag)
self.acf_w = ws.compute_field_acf(wprime, acf_maxlag)
# Obtain the number of independent samples based on the ACF
ind_u = ws.compute_field_acf_index(self.acf_u)
ind_w = ws.compute_field_acf_index(self.acf_w)
self.n_eff_u = self.vel.n_samples/(2*ind_u)
self.n_eff_w = self.vel.n_samples/(2*ind_w)
if do_save:
res_path = self.param.res_path
file_prefix = self.param.case_name+'_' + self.param.plane_name
save_var= {'n_eff_u': self.n_eff_u, 'n_eff_w': self.n_eff_w}
filename = os.path.join(res_path, file_prefix + '_ind_samples.plt')
tecreader.save_plt(save_var, self.dataset, filename, addvars = True, removevars = True)
class VelocityField(DataField):
def __init__(self, x=None,z=None,v=None,u=None,w=None):
DataField.__init__(self)
self.coords = {}
self.vel = {}
self.set_velocities(u,v,w)
def set_velocities(self,u,v,w):
self.vel['u'] = u
self.vel['v'] = v
self.vel['w'] = w
| [
"flowpost.wake.helpers.wake_stats.compute_field_acf_index",
"flowpost.wake.helpers.wake_stats.rotate_velocities",
"numpy.savez",
"scipy.signal.welch",
"os.makedirs",
"scipy.stats.kurtosis",
"dataclasses.dataclass",
"os.path.join",
"scipy.stats.skew",
"flowpost.wake.helpers.wake_stats.transform_wak... | [((586, 607), 'dataclasses.dataclass', 'dataclass', ([], {'init': '(False)'}), '(init=False)\n', (595, 607), False, 'from dataclasses import dataclass\n'), ((1596, 1649), 'numpy.gradient', 'np.gradient', (['self.vx', '(-self.dy / 1000)', '(self.dx / 1000)'], {}), '(self.vx, -self.dy / 1000, self.dx / 1000)\n', (1607, 1649), True, 'import numpy as np\n'), ((1662, 1715), 'numpy.gradient', 'np.gradient', (['self.vy', '(-self.dy / 1000)', '(self.dx / 1000)'], {}), '(self.vy, -self.dy / 1000, self.dx / 1000)\n', (1673, 1715), True, 'import numpy as np\n'), ((3468, 3556), 'flowpost.wake.helpers.wake_stats.rotate_dataset', 'ws.rotate_dataset', (['self.dataset', 'self.param.x_PMR', 'self.param.z_PMR', 'self.param.aoa'], {}), '(self.dataset, self.param.x_PMR, self.param.z_PMR, self.\n param.aoa)\n', (3485, 3556), True, 'import flowpost.wake.helpers.wake_stats as ws\n'), ((3573, 3678), 'flowpost.wake.helpers.wake_stats.transform_wake_coords', 'ws.transform_wake_coords', (['self.vel.x', 'self.vel.z', 'self.param.x_PMR', 'self.param.z_PMR', 'self.param.aoa'], {}), '(self.vel.x, self.vel.z, self.param.x_PMR, self.\n param.z_PMR, self.param.aoa)\n', (3597, 3678), True, 'import flowpost.wake.helpers.wake_stats as ws\n'), ((3694, 3806), 'flowpost.wake.helpers.wake_stats.rotate_velocities', 'ws.rotate_velocities', (['self.vel.u', 'self.vel.v', 'self.vel.w', 'self.param.x_PMR', 'self.param.z_PMR', 'self.param.aoa'], {}), '(self.vel.u, self.vel.v, self.vel.w, self.param.x_PMR,\n self.param.z_PMR, self.param.aoa)\n', (3714, 3806), True, 'import flowpost.wake.helpers.wake_stats as ws\n'), ((4999, 5051), 'numpy.round', 'np.round', (['(n_samples / (1 + n_overlap * (n_bins - 1)))'], {}), '(n_samples / (1 + n_overlap * (n_bins - 1)))\n', (5007, 5051), True, 'import numpy as np\n'), ((7342, 7395), 'os.path.join', 'os.path.join', (['res_path', "(file_prefix + '_skewness.plt')"], {}), "(res_path, file_prefix + '_skewness.plt')\n", (7354, 7395), False, 'import os\n'), ((7404, 7491), 'flowpost.IO.pyTecIO.tecreader.save_plt', 'tecreader.save_plt', (['save_var', 'self.dataset', 'filename'], {'addvars': '(True)', 'removevars': '(True)'}), '(save_var, self.dataset, filename, addvars=True,\n removevars=True)\n', (7422, 7491), True, 'import flowpost.IO.pyTecIO.tecreader as tecreader\n'), ((7611, 7664), 'os.path.join', 'os.path.join', (['res_path', "(file_prefix + '_kurtosis.plt')"], {}), "(res_path, file_prefix + '_kurtosis.plt')\n", (7623, 7664), False, 'import os\n'), ((7673, 7760), 'flowpost.IO.pyTecIO.tecreader.save_plt', 'tecreader.save_plt', (['save_var', 'self.dataset', 'filename'], {'addvars': '(True)', 'removevars': '(True)'}), '(save_var, self.dataset, filename, addvars=True,\n removevars=True)\n', (7691, 7760), True, 'import flowpost.IO.pyTecIO.tecreader as tecreader\n'), ((8678, 8728), 'os.path.join', 'os.path.join', (['res_path', "(file_prefix + '_means.plt')"], {}), "(res_path, file_prefix + '_means.plt')\n", (8690, 8728), False, 'import os\n'), ((8768, 8862), 'flowpost.IO.pyTecIO.tecreader.save_plt', 'tecreader.save_plt', (['self.stats.mean', 'self.dataset', 'filename'], {'addvars': '(True)', 'removevars': '(True)'}), '(self.stats.mean, self.dataset, filename, addvars=True,\n removevars=True)\n', (8786, 8862), True, 'import flowpost.IO.pyTecIO.tecreader as tecreader\n'), ((9577, 9631), 'os.path.join', 'os.path.join', (['res_path', "(file_prefix + '_rstresses.plt')"], {}), "(res_path, file_prefix + '_rstresses.plt')\n", (9589, 9631), False, 'import os\n'), ((9640, 9727), 'flowpost.IO.pyTecIO.tecreader.save_plt', 'tecreader.save_plt', (['save_var', 'self.dataset', 'filename'], {'addvars': '(True)', 'removevars': '(True)'}), '(save_var, self.dataset, filename, addvars=True,\n removevars=True)\n', (9658, 9727), True, 'import flowpost.IO.pyTecIO.tecreader as tecreader\n'), ((9827, 9886), 'flowpost.wake.helpers.wake_stats.compute_fluctuations', 'ws.compute_fluctuations', (['self.vel.u', 'self.vel.v', 'self.vel.w'], {}), '(self.vel.u, self.vel.v, self.vel.w)\n', (9850, 9886), True, 'import flowpost.wake.helpers.wake_stats as ws\n'), ((10512, 10580), 'flowpost.wake.helpers.wake_stats.rotate_dataset', 'ws.rotate_dataset', (['self.dataset', 'param.x_PMR', 'param.z_PMR', 'param.aoa'], {}), '(self.dataset, param.x_PMR, param.z_PMR, param.aoa)\n', (10529, 10580), True, 'import flowpost.wake.helpers.wake_stats as ws\n'), ((10602, 10677), 'flowpost.wake.helpers.wake_stats.transform_wake_coords', 'ws.transform_wake_coords', (['vel.x', 'vel.z', 'param.x_PMR', 'param.z_PMR', 'param.aoa'], {}), '(vel.x, vel.z, param.x_PMR, param.z_PMR, param.aoa)\n', (10626, 10677), True, 'import flowpost.wake.helpers.wake_stats as ws\n'), ((10698, 10776), 'flowpost.wake.helpers.wake_stats.rotate_velocities', 'ws.rotate_velocities', (['vel.u', 'vel.v', 'vel.w', 'param.x_PMR', 'param.z_PMR', 'param.aoa'], {}), '(vel.u, vel.v, vel.w, param.x_PMR, param.z_PMR, param.aoa)\n', (10718, 10776), True, 'import flowpost.wake.helpers.wake_stats as ws\n'), ((11491, 11553), 'os.path.join', 'os.path.join', (['res_path', "(file_prefix + '_anisotropy_tensor.plt')"], {}), "(res_path, file_prefix + '_anisotropy_tensor.plt')\n", (11503, 11553), False, 'import os\n'), ((11586, 11673), 'flowpost.IO.pyTecIO.tecreader.save_plt', 'tecreader.save_plt', (['save_var', 'self.dataset', 'filename'], {'addvars': '(True)', 'removevars': '(True)'}), '(save_var, self.dataset, filename, addvars=True,\n removevars=True)\n', (11604, 11673), True, 'import flowpost.IO.pyTecIO.tecreader as tecreader\n'), ((11762, 11829), 'os.path.join', 'os.path.join', (['res_path', "(file_prefix + '_anisotropy_eigenvalues.plt')"], {}), "(res_path, file_prefix + '_anisotropy_eigenvalues.plt')\n", (11774, 11829), False, 'import os\n'), ((11838, 11925), 'flowpost.IO.pyTecIO.tecreader.save_plt', 'tecreader.save_plt', (['save_var', 'self.dataset', 'filename'], {'addvars': '(True)', 'removevars': '(True)'}), '(save_var, self.dataset, filename, addvars=True,\n removevars=True)\n', (11856, 11925), True, 'import flowpost.IO.pyTecIO.tecreader as tecreader\n'), ((12008, 12074), 'os.path.join', 'os.path.join', (['res_path', "(file_prefix + '_anisotropy_components.plt')"], {}), "(res_path, file_prefix + '_anisotropy_components.plt')\n", (12020, 12074), False, 'import os\n'), ((12083, 12170), 'flowpost.IO.pyTecIO.tecreader.save_plt', 'tecreader.save_plt', (['save_var', 'self.dataset', 'filename'], {'addvars': '(True)', 'removevars': '(True)'}), '(save_var, self.dataset, filename, addvars=True,\n removevars=True)\n', (12101, 12170), True, 'import flowpost.IO.pyTecIO.tecreader as tecreader\n'), ((12338, 12397), 'flowpost.wake.helpers.wake_stats.compute_fluctuations', 'ws.compute_fluctuations', (['self.vel.u', 'self.vel.v', 'self.vel.w'], {}), '(self.vel.u, self.vel.v, self.vel.w)\n', (12361, 12397), True, 'import flowpost.wake.helpers.wake_stats as ws\n'), ((12419, 12459), 'flowpost.wake.helpers.wake_stats.compute_field_acf', 'ws.compute_field_acf', (['uprime', 'acf_maxlag'], {}), '(uprime, acf_maxlag)\n', (12439, 12459), True, 'import flowpost.wake.helpers.wake_stats as ws\n'), ((12481, 12521), 'flowpost.wake.helpers.wake_stats.compute_field_acf', 'ws.compute_field_acf', (['wprime', 'acf_maxlag'], {}), '(wprime, acf_maxlag)\n', (12501, 12521), True, 'import flowpost.wake.helpers.wake_stats as ws\n'), ((12607, 12645), 'flowpost.wake.helpers.wake_stats.compute_field_acf_index', 'ws.compute_field_acf_index', (['self.acf_u'], {}), '(self.acf_u)\n', (12633, 12645), True, 'import flowpost.wake.helpers.wake_stats as ws\n'), ((12662, 12700), 'flowpost.wake.helpers.wake_stats.compute_field_acf_index', 'ws.compute_field_acf_index', (['self.acf_w'], {}), '(self.acf_w)\n', (12688, 12700), True, 'import flowpost.wake.helpers.wake_stats as ws\n'), ((5310, 5411), 'scipy.signal.welch', 'scipy.signal.welch', (['data[point, :]'], {'fs': '(1 / dt)', 'window': 'window', 'nperseg': 'nperseg', 'scaling': '"""density"""'}), "(data[point, :], fs=1 / dt, window=window, nperseg=\n nperseg, scaling='density')\n", (5328, 5411), False, 'import scipy\n'), ((7040, 7058), 'scipy.stats.skew', 'skew', (['var'], {'axis': '(-1)'}), '(var, axis=-1)\n', (7044, 7058), False, 'from scipy.stats import kurtosis, skew\n'), ((7089, 7111), 'scipy.stats.kurtosis', 'kurtosis', (['var'], {'axis': '(-1)'}), '(var, axis=-1)\n', (7097, 7111), False, 'from scipy.stats import kurtosis, skew\n'), ((9172, 9218), 'os.makedirs', 'os.makedirs', (['res_path'], {'mode': '(511)', 'exist_ok': '(True)'}), '(res_path, mode=511, exist_ok=True)\n', (9183, 9218), False, 'import os\n'), ((11095, 11141), 'os.makedirs', 'os.makedirs', (['res_path'], {'mode': '(511)', 'exist_ok': '(True)'}), '(res_path, mode=511, exist_ok=True)\n', (11106, 11141), False, 'import os\n'), ((13042, 13098), 'os.path.join', 'os.path.join', (['res_path', "(file_prefix + '_ind_samples.plt')"], {}), "(res_path, file_prefix + '_ind_samples.plt')\n", (13054, 13098), False, 'import os\n'), ((13111, 13198), 'flowpost.IO.pyTecIO.tecreader.save_plt', 'tecreader.save_plt', (['save_var', 'self.dataset', 'filename'], {'addvars': '(True)', 'removevars': '(True)'}), '(save_var, self.dataset, filename, addvars=True,\n removevars=True)\n', (13129, 13198), True, 'import flowpost.IO.pyTecIO.tecreader as tecreader\n'), ((6709, 6777), 'numpy.savez', 'np.savez', (['filename'], {'x': 'self.x', 'y': 'self.y', 'z': 'self.z', 'f': 'f[0, :]', 'PSD': 'PSD'}), '(filename, x=self.x, y=self.y, z=self.z, f=f[0, :], PSD=PSD)\n', (6717, 6777), True, 'import numpy as np\n')] |
import numpy
import random
import sys
from nsga2 import Nsga2
class Mtsp(object):
@staticmethod
def crossover_sequence_ox(parent_sequence_a, parent_sequence_b):
sequence_length = len(parent_sequence_a)
child_sequence_a = [None] * sequence_length
child_sequence_b = [None] * sequence_length
first = random.randrange(sequence_length)
second = random.randrange(sequence_length)
left, right = min(first, second), max(first, second)
for m in range(left, right + 1):
child_sequence_a[m] = parent_sequence_a[m]
child_sequence_b[m] = parent_sequence_b[m]
m = (right + 1) % sequence_length
n_a = m
n_b = m
while m != left:
while parent_sequence_a[n_a] in child_sequence_b:
n_a = (n_a + 1) % sequence_length
while parent_sequence_b[n_b] in child_sequence_a:
n_b = (n_b + 1) % sequence_length
child_sequence_b[m] = parent_sequence_a[n_a]
child_sequence_a[m] = parent_sequence_b[n_b]
m = (m + 1) % sequence_length
return child_sequence_a, child_sequence_b
@staticmethod
def mutate_sequence(sequence):
length = len(sequence)
a = random.randrange(length)
b = random.randrange(length)
sequence[a], sequence[b] = sequence[b], sequence[a]
@staticmethod
def read_matrix_file(filename):
with open(filename) as matrix_file:
num_cities = len(matrix_file.readline().split(',')) - 1
values = numpy.zeros((num_cities, num_cities))
for i in range(num_cities):
row = list(float(value) for value in matrix_file.readline().split(',')[1:] if value.strip())
values[i,:len(row)] = row
values[:len(row),i] = row
return values
@classmethod
def build(cls, distance_filename, cost_filename):
return cls(Mtsp.read_matrix_file(distance_filename),
Mtsp.read_matrix_file(cost_filename))
def __init__(self, distances, costs):
self.distances = distances
self.costs = costs
self.num_cities = self.distances.shape[0]
def evaluate_objectives(self, sequence):
distance = 0
cost = 0
from_city_id = sequence[0]
for to_city_id in sequence[1:]:
distance += self.distances[from_city_id, to_city_id]
cost += self.costs[from_city_id, to_city_id]
from_city_id = to_city_id
distance += self.distances[from_city_id, sequence[0]]
cost += self.costs[from_city_id, sequence[0]]
return (distance, cost)
def generate_sequence(self):
sequence = list(range(self.num_cities))
random.shuffle(sequence)
return sequence
def initialize(self, options):
self.nsga2 = Nsga2(
options=options,
genotype_creator=self.generate_sequence,
objective_evaluator=self.evaluate_objectives,
crossover_operator=Mtsp.crossover_sequence_ox,
mutation_operator=Mtsp.mutate_sequence)
| [
"nsga2.Nsga2",
"numpy.zeros",
"random.shuffle",
"random.randrange"
] | [((343, 376), 'random.randrange', 'random.randrange', (['sequence_length'], {}), '(sequence_length)\n', (359, 376), False, 'import random\n'), ((394, 427), 'random.randrange', 'random.randrange', (['sequence_length'], {}), '(sequence_length)\n', (410, 427), False, 'import random\n'), ((1276, 1300), 'random.randrange', 'random.randrange', (['length'], {}), '(length)\n', (1292, 1300), False, 'import random\n'), ((1313, 1337), 'random.randrange', 'random.randrange', (['length'], {}), '(length)\n', (1329, 1337), False, 'import random\n'), ((2817, 2841), 'random.shuffle', 'random.shuffle', (['sequence'], {}), '(sequence)\n', (2831, 2841), False, 'import random\n'), ((2923, 3128), 'nsga2.Nsga2', 'Nsga2', ([], {'options': 'options', 'genotype_creator': 'self.generate_sequence', 'objective_evaluator': 'self.evaluate_objectives', 'crossover_operator': 'Mtsp.crossover_sequence_ox', 'mutation_operator': 'Mtsp.mutate_sequence'}), '(options=options, genotype_creator=self.generate_sequence,\n objective_evaluator=self.evaluate_objectives, crossover_operator=Mtsp.\n crossover_sequence_ox, mutation_operator=Mtsp.mutate_sequence)\n', (2928, 3128), False, 'from nsga2 import Nsga2\n'), ((1586, 1623), 'numpy.zeros', 'numpy.zeros', (['(num_cities, num_cities)'], {}), '((num_cities, num_cities))\n', (1597, 1623), False, 'import numpy\n')] |
import sys
import tensorflow as tf
import pdb
import numpy as np
import myParams
import GTools as GT
import scipy.io
import h5py
import time
FLAGS = tf.app.flags.FLAGS
def setup_inputs(sess, filenames, image_size=None, capacity_factor=3, TestStuff=False):
batch_size=myParams.myDict['batch_size']
channelsIn=myParams.myDict['channelsIn']
channelsOut=myParams.myDict['channelsOut']
DataH=myParams.myDict['DataH']
DataW=myParams.myDict['DataW']
LabelsH=myParams.myDict['LabelsH']
LabelsW=myParams.myDict['LabelsW']
if myParams.myDict['InputMode'] == 'I2I_ApplySens':
print('I2I loading labels ' + time.strftime("%Y-%m-%d %H:%M:%S"))
DatasetMatFN=myParams.myDict['LabelsMatFN']
f = h5py.File(DatasetMatFN, 'r')
nToLoad=myParams.myDict['nToLoad']
LoadAndRunOnData=myParams.myDict['LoadAndRunOnData']>0
if LoadAndRunOnData:
nToLoad=3
labels=f['Data'][1:nToLoad]
print('Loaded images ' + time.strftime("%Y-%m-%d %H:%M:%S"))
SensFN='/media/a/H2/home/a/gUM/ESensCC128.mat'
SensCC=scipy.io.loadmat(SensFN)
Sens=SensCC['ESensCC128']
SensMsk=SensCC['MskS']
SensMsk=np.reshape(SensMsk,(SensMsk.shape[0],SensMsk.shape[1],1))
def ConcatCOnDim(X,dim): return tf.cast(tf.concat([tf.real(X),tf.imag(X)],axis=dim),tf.float32)
def myrot90(X): return tf.transpose(X, perm=[1,0,2])
with tf.device('/gpu:0'):
TFL = tf.constant(np.int32(labels))
Idx=tf.random_uniform([1],minval=0,maxval=TFL.shape[0],dtype=tf.int32)
labelR=tf.slice(TFL,[Idx[0],0,0,0],[1,-1,-1,1])
labelI=tf.slice(TFL,[Idx[0],0,0,1],[1,-1,-1,1])
labelR=tf.cast(labelR,tf.complex64)
labelI=tf.cast(labelI,tf.complex64)
label=tf.cast((labelR + 1j*labelI)/30000.0, tf.complex64)
myParams.myDict['channelsOut']=1
myParams.myDict['LabelsH']=labels.shape[1]
myParams.myDict['LabelsW']=labels.shape[2]
myParams.myDict['DataH']=labels.shape[1]
myParams.myDict['DataW']=labels.shape[2]
label = tf.reshape(label, [LabelsH, LabelsW, 1])
label = tf.image.random_flip_left_right(label)
label = tf.image.random_flip_up_down(label)
u1=tf.random_uniform([1])
label=tf.cond(u1[0]<0.5, lambda: tf.identity(label), lambda: myrot90(label))
TFMsk = tf.constant(np.complex64(SensMsk))
TFSens = tf.constant(np.complex64(Sens))
label=tf.multiply(label,TFMsk)
feature=label
# label=ConcatCOnDim(label,2)
label = tf.cast(tf.abs(label),tf.float32)
feature=tf.multiply(feature,TFSens)
feature=ConcatCOnDim(feature,2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'I2I_B0':
print('I2I loading labels ' + time.strftime("%Y-%m-%d %H:%M:%S"))
DatasetMatFN=myParams.myDict['LabelsMatFN']
f = h5py.File(DatasetMatFN, 'r')
nToLoad=myParams.myDict['nToLoad']
LoadAndRunOnData=myParams.myDict['LoadAndRunOnData']>0
if LoadAndRunOnData:
nToLoad=3
labels=f['Data'][1:nToLoad]
LMin=np.float32(f['Min'])
LRange=np.float32(f['Range'])
print('Min, Range: %f,%f' % (LMin,LRange))
print('Loaded images ' + time.strftime("%Y-%m-%d %H:%M:%S"))
print('I2I loading features ' + time.strftime("%Y-%m-%d %H:%M:%S"))
DatasetMatFN=myParams.myDict['FeaturesMatFN']
f = h5py.File(DatasetMatFN, 'r')
features=f['Data'][1:nToLoad]
FMin=np.float32(f['Min'])
FRange=np.float32(f['Range'])
print('Min, Range: %f,%f' % (FMin,FRange))
print('Loaded featuress ' + time.strftime("%Y-%m-%d %H:%M:%S"))
TFL = tf.constant(np.int16(labels))
TFF = tf.constant(np.int16(features))
Idx=tf.random_uniform([1],minval=0,maxval=TFL.shape[0],dtype=tf.int32)
label=tf.slice(TFL,[Idx[0],0,0],[1,-1,-1,])
feature=tf.slice(TFF,[Idx[0],0,0,0],[1,-1,-1,-1])
label = tf.cast(label, tf.float32)
feature = tf.cast(feature, tf.float32)
label=(label*LRange/30000.0)+LMin
feature=(feature*FRange/30000.0)+FMin
if labels.ndim==4:
label = tf.reshape(label, [LabelsH, LabelsW, TFL.shape[3]])
else:
label = tf.reshape(label, [LabelsH, LabelsW, 1])
if features.ndim==4:
feature = tf.reshape(feature, [LabelsH, LabelsW, TFF.shape[3]])
else:
feature = tf.reshape(feature, [LabelsH, LabelsW, 1])
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'I2I':
print('I2I loading labels ' + time.strftime("%Y-%m-%d %H:%M:%S"))
DatasetMatFN=myParams.myDict['LabelsMatFN']
# DatasetMatFN='/media/a/H2/home/a/gUM/GRE_U1.4_Labels.mat'
f = h5py.File(DatasetMatFN, 'r')
nToLoad=myParams.myDict['nToLoad']
LoadAndRunOnData=myParams.myDict['LoadAndRunOnData']>0
if LoadAndRunOnData:
nToLoad=3
labels=f['labels'][1:nToLoad]
print('Loaded images ' + time.strftime("%Y-%m-%d %H:%M:%S"))
print('I2I loading features ' + time.strftime("%Y-%m-%d %H:%M:%S"))
DatasetMatFN=myParams.myDict['FeaturesMatFN']
# DatasetMatFN='/media/a/H2/home/a/gUM/GRE_U1.4_Features.mat'
f = h5py.File(DatasetMatFN, 'r')
features=f['features'][1:nToLoad]
print('Loaded featuress ' + time.strftime("%Y-%m-%d %H:%M:%S"))
TFL = tf.constant(np.int16(labels))
TFF = tf.constant(np.int16(features))
Idx=tf.random_uniform([1],minval=0,maxval=TFL.shape[0],dtype=tf.int32)
# label=tf.slice(TFL,[Idx[0],0,0],[1,-1,-1])
label=tf.slice(TFL,[Idx[0],0,0,0],[1,-1,-1,-1])
feature=tf.slice(TFF,[Idx[0],0,0,0],[1,-1,-1,-1])
label = tf.cast(label, tf.float32)
feature = tf.cast(feature, tf.float32)
if labels.ndim==4:
label = tf.reshape(label, [LabelsH, LabelsW, TFL.shape[3]])
else:
label = tf.reshape(label, [LabelsH, LabelsW, 1])
if features.ndim==4:
feature = tf.reshape(feature, [LabelsH, LabelsW, TFF.shape[3]])
else:
feature = tf.reshape(feature, [LabelsH, LabelsW, 1])
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'RegridTry3FMB':
BaseTSDataP=myParams.myDict['BaseTSDataP']
BaseNUFTDataP=myParams.myDict['BaseNUFTDataP']
B0Data=scipy.io.loadmat(BaseTSDataP + 'B0TS.mat')
TSBFA=B0Data['TSBFA']
TSCA=B0Data['TSCA']
TSBFB=B0Data['TSBFB']
TSCB=B0Data['TSCB']
SensCC=scipy.io.loadmat(BaseTSDataP + 'SensCC1.mat')
SensA=SensCC['SensCCA']
SensMskA=SensCC['SensMskA']
SensB=SensCC['SensCCB']
SensMskB=SensCC['SensMskB']
SensMskA=np.reshape(SensMskA,(SensMskA.shape[0],SensMskA.shape[1],1))
SensMskB=np.reshape(SensMskB,(SensMskB.shape[0],SensMskB.shape[1],1))
TFMskA = tf.constant(np.complex64(SensMskA))
TFMskB = tf.constant(np.complex64(SensMskB))
print('loading images ' + time.strftime("%Y-%m-%d %H:%M:%S"))
# f = h5py.File('/media/a/H1/HCPData_256x256_int16.mat', 'r')
DatasetMatFN=myParams.myDict['DatasetMatFN']
f = h5py.File(DatasetMatFN, 'r')
nToLoad=myParams.myDict['nToLoad']
# nToLoad=10000
LoadAndRunOnData=myParams.myDict['LoadAndRunOnData']>0
if LoadAndRunOnData:
nToLoad=3
I=f['HCPData'][1:nToLoad]
print('Loaded images ' + time.strftime("%Y-%m-%d %H:%M:%S"))
H=LabelsH
W=LabelsW
TFI = tf.constant(np.int16(I))
IdxA=tf.random_uniform([1],minval=0,maxval=I.shape[0],dtype=tf.int32)
IdxB=tf.random_uniform([1],minval=0,maxval=I.shape[0],dtype=tf.int32)
featureA=tf.slice(TFI,[IdxA[0],0,0],[1,-1,-1])
featureB=tf.slice(TFI,[IdxB[0],0,0],[1,-1,-1])
featureA=tf.transpose(featureA, perm=[1,2,0])
featureB=tf.transpose(featureB, perm=[1,2,0])
featureA = tf.image.random_flip_left_right(featureA)
featureA = tf.image.random_flip_up_down(featureA)
u1=tf.random_uniform([1])
featureA=tf.cond(u1[0]<0.5, lambda: tf.identity(featureA), lambda: tf.image.rot90(featureA))
featureB = tf.image.random_flip_left_right(featureB)
featureB = tf.image.random_flip_up_down(featureB)
u1=tf.random_uniform([1])
featureB=tf.cond(u1[0]<0.5, lambda: tf.identity(featureB), lambda: tf.image.rot90(featureB))
featureA = tf.random_crop(featureA, [H, W, 1])
featureB = tf.random_crop(featureB, [H, W, 1])
featureA = tf.cast(featureA, tf.int32)
featureB = tf.cast(featureB, tf.int32)
mxA=tf.maximum(tf.reduce_max(featureA),1)
mxB=tf.maximum(tf.reduce_max(featureB),1)
featureA = tf.cast(featureA/mxA, tf.complex64)
featureB = tf.cast(featureB/mxB, tf.complex64)
featureA=tf.multiply(featureA,TFMskA)
featureB=tf.multiply(featureB,TFMskB)
LFac=myParams.myDict['RandomPhaseLinearFac']
QFac=myParams.myDict['RandomPhaseQuadraticFac']
SFac=myParams.myDict['RandomPhaseScaleFac']
QA=GT.TFGenerateRandomSinPhase(H, W,LFac,QFac,SFac) # (nx=100,ny=120,LFac=5,QFac=0.1,SFac=2):
QB=GT.TFGenerateRandomSinPhase(H, W,LFac,QFac,SFac)
CurIWithPhaseA=featureA*tf.reshape(QA,[H,W,1])
CurIWithPhaseB=featureB*tf.reshape(QB,[H,W,1])
NUFTData=scipy.io.loadmat(BaseNUFTDataP + 'TrajForNUFT.mat')
Kd=NUFTData['Kd']
P=NUFTData['P']
SN=NUFTData['SN']
Trajm2=NUFTData['Trajm2']
nTraj=Trajm2.shape[1]
nCh=SensA.shape[2]
nTSC=TSCA.shape[2]
# ggg Arrived till here. CAIPI supposed to be into TSB anyway
SNcA,paddings,sp_R,sp_I,TSBFXA=GT.TF_TSNUFFT_Prepare(SN,SensA,TSCA,TSBFA,Kd,P)
SNcB,paddings,sp_R,sp_I,TSBFXB=GT.TF_TSNUFFT_Prepare(SN,SensB,TSCB,TSBFB,Kd,P)
def ConcatCI(X): return tf.concat([tf.real(X),tf.imag(X)],axis=0)
def ConcatCIOn2(X): return tf.concat([tf.real(X),tf.imag(X)],axis=2)
if myParams.myDict['BankSize']>0:
BankSize=myParams.myDict['BankSize']
BankK=myParams.myDict['BankK']
label_indexes = tf.constant(np.int32(np.arange(0,BankSize)),dtype=tf.int32)
BankK_indexes = tf.constant(np.int32(np.arange(0,BankSize*BankK)),dtype=tf.int32)
Bankdataset = tf.data.Dataset.from_tensor_slices(label_indexes)
Bankdataset = Bankdataset.repeat(count=None)
Bankiter = Bankdataset.make_one_shot_iterator()
label_index = Bankiter.get_next()
label_index=tf.cast(label_index,tf.int32)
label_index=label_index*2
BankKdataset = tf.data.Dataset.from_tensor_slices(BankK_indexes)
BankKdataset = BankKdataset.repeat(count=None)
BankKiter = BankKdataset.make_one_shot_iterator()
label_indexK = BankKiter.get_next()
label_indexK=tf.cast(label_indexK,tf.int32)
label_indexK=label_indexK*2
IdxAX=tf.random_uniform([1],minval=0,maxval=BankSize,dtype=tf.int32)
IdxBX=tf.random_uniform([1],minval=0,maxval=BankSize,dtype=tf.int32)
with tf.device('/gpu:0'):
OnlyTakeFromBank=tf.greater(label_indexK,label_index)
with tf.variable_scope("aaa", reuse=True):
Bank=tf.get_variable("Bank",dtype=tf.float32)
LBank=tf.get_variable("LBank",dtype=tf.float32)
def f2(): return tf.scatter_nd_update(Bank,[[label_index],[label_index+1]], [ConcatCI(tf.reshape(tf.transpose(GT.TF_TSNUFFT_Run(CurIWithPhaseA,SNcA,paddings,nTraj,nTSC,nCh,sp_R,sp_I,TSBFXA), perm=[1,0]),[nTraj*nCh,1,1])),ConcatCI(tf.reshape(tf.transpose(GT.TF_TSNUFFT_Run(CurIWithPhaseB,SNcB,paddings,nTraj,nTSC,nCh,sp_R,sp_I,TSBFXB), perm=[1,0]),[nTraj*nCh,1,1]))])
def f2L(): return tf.scatter_nd_update(LBank,[[label_index],[label_index+1]], [ConcatCIOn2(CurIWithPhaseA),ConcatCIOn2(CurIWithPhaseB)])
Bank = tf.cond(OnlyTakeFromBank, lambda: tf.identity(Bank), f2)
LBank = tf.cond(OnlyTakeFromBank, lambda: tf.identity(LBank), f2L)
IdxAF = tf.cond(OnlyTakeFromBank, lambda: tf.identity(IdxAX[0]*2), lambda: tf.identity(label_index))
IdxBF = tf.cond(OnlyTakeFromBank, lambda: tf.identity(IdxBX[0]*2+1), lambda: tf.identity(label_index+1))
# Take from bank in any case
featureAX = tf.slice(Bank,[IdxAF,0,0,0],[1,-1,-1,-1])
featureAX = tf.reshape(featureAX, [DataH, 1, 1])
featureBX = tf.slice(Bank,[IdxBF,0,0,0],[1,-1,-1,-1])
featureBX = tf.reshape(featureBX, [DataH, 1, 1])
featureX=featureAX+featureBX # That's MB
labelAX = tf.slice(LBank,[IdxAF,0,0,0],[1,-1,-1,-1])
labelAX = tf.reshape(labelAX, [H, W, 2])
labelBX = tf.slice(LBank,[IdxBF,0,0,0],[1,-1,-1,-1])
labelBX = tf.reshape(labelBX, [H, W, 2])
labelX = tf.concat([labelAX,labelBX],axis=1);
features, labels = tf.train.batch([featureX, labelX],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
else:
featureA=GT.TF_TSNUFFT_Run(CurIWithPhaseA,SNcA,paddings,nTraj,nTSC,nCh,sp_R,sp_I,TSBFXA)
featureB=GT.TF_TSNUFFT_Run(CurIWithPhaseB,SNcB,paddings,nTraj,nTSC,nCh,sp_R,sp_I,TSBFXB)
feature=featureA+featureB # That's MB
feature=tf.transpose(feature, perm=[1,0])
F=tf.reshape(feature,[nTraj*nCh,1,1])
feature=ConcatCI(F)
CurIWithPhase=tf.concat([CurIWithPhaseA,CurIWithPhaseB],axis=1);
label=tf.concat([tf.real(CurIWithPhase),tf.imag(CurIWithPhase)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'RegridTry3F':
BaseTSDataP=myParams.myDict['BaseTSDataP']
BaseNUFTDataP=myParams.myDict['BaseNUFTDataP']
B0Data=scipy.io.loadmat(BaseTSDataP + 'B0TS.mat')
# Sens=B0Data['Sens']
TSBF=B0Data['TSBF']
TSC=B0Data['TSC']
SensCC=scipy.io.loadmat(BaseTSDataP + 'SensCC1.mat')
Sens=SensCC['SensCC']
SensMsk=SensCC['SensMsk']
SensMsk=np.reshape(SensMsk,(SensMsk.shape[0],SensMsk.shape[1],1))
TFMsk = tf.constant(np.complex64(SensMsk))
print('loading images ' + time.strftime("%Y-%m-%d %H:%M:%S"))
# I=scipy.io.loadmat('/media/a/H1/First3kIm256x256Magint16.mat')
# I=I['First3kIm256x256Magint16']
DatasetMatFN=myParams.myDict['DatasetMatFN']
# f = h5py.File('/media/a/H1/HCPData_256x256_int16.mat', 'r')
f = h5py.File(DatasetMatFN, 'r')
# nToLoad=10000
nToLoad=myParams.myDict['nToLoad']
LoadAndRunOnData=myParams.myDict['LoadAndRunOnData']>0
if LoadAndRunOnData:
nToLoad=3
I=f['HCPData'][1:nToLoad]
print('Loaded images ' + time.strftime("%Y-%m-%d %H:%M:%S"))
# I=scipy.io.loadmat('/media/a/H1/First1kIm256x256Magint16.mat')
# I=I['First1kIm256x256Magint16']
H=LabelsH
W=LabelsW
TFI = tf.constant(np.int16(I))
Idx=tf.random_uniform([1],minval=0,maxval=I.shape[0],dtype=tf.int32)
feature=tf.slice(TFI,[Idx[0],0,0],[1,-1,-1])
feature=tf.transpose(feature, perm=[1,2,0])
feature = tf.image.random_flip_left_right(feature)
feature = tf.image.random_flip_up_down(feature)
# u1 = tf.distributions.Uniform(low=0.0, high=1.0)
u1=tf.random_uniform([1])
feature=tf.cond(u1[0]<0.5, lambda: tf.identity(feature), lambda: tf.image.rot90(feature))
# tf.image.rot90( image, k=1, name=None)
# MYGlobalStep = tf.Variable(0, trainable=False, name='Myglobal_step')
# MYGlobalStep = MYGlobalStep+1
# feature=tf.cond(MYGlobalStep>0, lambda: tf.identity(feature), lambda: tf.identity(feature))
# feature = tf.Print(feature,[MYGlobalStep,],message='MYGlobalStep:')
# image = tf.image.random_saturation(image, .95, 1.05)
# image = tf.image.random_brightness(image, .05)
#image = tf.image.random_contrast(image, .95, 1.05)
feature = tf.random_crop(feature, [H, W, 1])
feature = tf.cast(feature, tf.int32)
mx=tf.reduce_max(feature)
mx=tf.maximum(mx,1)
feature = tf.cast(feature/mx, tf.complex64)
feature=tf.multiply(feature,TFMsk)
Q=GT.TFGenerateRandomSinPhase(H, W)
CurIWithPhase=feature*tf.reshape(Q,[H,W,1])
label=tf.concat([tf.real(CurIWithPhase),tf.imag(CurIWithPhase)],axis=2)
NUFTData=scipy.io.loadmat(BaseNUFTDataP + 'TrajForNUFT.mat')
Kd=NUFTData['Kd']
P=NUFTData['P']
SN=NUFTData['SN']
Trajm2=NUFTData['Trajm2']
nTraj=Trajm2.shape[1]
nCh=Sens.shape[2]
nTSC=TSC.shape[2]
SNc,paddings,sp_R,sp_I,TSBFX=GT.TF_TSNUFFT_Prepare(SN,Sens,TSC,TSBF,Kd,P)
# feature=GT.TF_TSNUFFT_Run(CurIWithPhase,SNc,paddings,nTraj,nTSC,nCh,sp_R,sp_I,TSBFX)
# feature=tf.transpose(feature, perm=[1,0])
# F=tf.reshape(feature,[nTraj*nCh,1,1])
# feature=tf.concat([tf.real(F),tf.imag(F)],axis=0)
def ConcatCI(X): return tf.concat([tf.real(X),tf.imag(X)],axis=0)
# feature=ConcatCI(F)
# feature=ConcatCI(tf.reshape(tf.transpose(GT.TF_TSNUFFT_Run(CurIWithPhase,SNc,paddings,nTraj,nTSC,nCh,sp_R,sp_I,TSBFX), perm=[1,0]),[nTraj*nCh,1,1]))
# ggg Signal Bank stuff:
if myParams.myDict['BankSize']>0:
BankSize=myParams.myDict['BankSize']
BankK=myParams.myDict['BankK']
label_indexes = tf.constant(np.int32(np.arange(0,BankSize)),dtype=tf.int32)
BankK_indexes = tf.constant(np.int32(np.arange(0,BankSize*BankK)),dtype=tf.int32)
Bankdataset = tf.data.Dataset.from_tensor_slices(label_indexes)
Bankdataset = Bankdataset.repeat(count=None)
Bankiter = Bankdataset.make_one_shot_iterator()
label_index = Bankiter.get_next()
label_index=tf.cast(label_index,tf.int32)
BankKdataset = tf.data.Dataset.from_tensor_slices(BankK_indexes)
BankKdataset = BankKdataset.repeat(count=None)
BankKiter = BankKdataset.make_one_shot_iterator()
label_indexK = BankKiter.get_next()
label_indexK=tf.cast(label_indexK,tf.int32)
with tf.device('/gpu:0'):
OnlyTakeFromBank=tf.greater(label_indexK,label_index)
with tf.variable_scope("aaa", reuse=True):
Bank=tf.get_variable("Bank",dtype=tf.float32)
LBank=tf.get_variable("LBank",dtype=tf.float32)
def f2(): return tf.scatter_nd_update(Bank,[[label_index]], [ConcatCI(tf.reshape(tf.transpose(GT.TF_TSNUFFT_Run(CurIWithPhase,SNc,paddings,nTraj,nTSC,nCh,sp_R,sp_I,TSBFX), perm=[1,0]),[nTraj*nCh,1,1]))])
def f2L(): return tf.scatter_nd_update(LBank,[[label_index]], [label])
Bank = tf.cond(OnlyTakeFromBank, lambda: tf.identity(Bank), f2)
LBank = tf.cond(OnlyTakeFromBank, lambda: tf.identity(LBank), f2L)
# Take from bank in any case
featureX = tf.slice(Bank,[label_index,0,0,0],[1,-1,-1,-1])
featureX = tf.reshape(featureX, [DataH, 1, 1])
# featureX = tf.Print(featureX,[label_index,label_indexK],message='Taking from bank:')
labelX = tf.slice(LBank,[label_index,0,0,0],[1,-1,-1,-1])
labelX = tf.reshape(labelX, [H, W, 2])
features, labels = tf.train.batch([featureX, labelX],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
# feature = tf.cond(TakeFromBank, lambda: tf.identity(Bfeature), lambda: tf.identity(Afeature))
# label = tf.cond(TakeFromBank, lambda: tf.identity(Blabel), lambda: tf.identity(Alabel))
else:
feature=ConcatCI(tf.reshape(tf.transpose(GT.TF_TSNUFFT_Run(CurIWithPhase,SNc,paddings,nTraj,nTSC,nCh,sp_R,sp_I,TSBFX), perm=[1,0]),[nTraj*nCh,1,1]))
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
# ggg end Signal Bank stuff:
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'RegridTry3M':
Msk=scipy.io.loadmat('/media/a/DATA/meas_MID244_gBP_VD11_U19_G35S155_4min_FID22439/Sli08/Msk.mat')
Msk=Msk['Msk']
TFMsk = tf.constant(Msk)
FN='/media/a/H1/meas_MID244_gBP_VD11_U19_G35S155_4min_FID22439/AllData_Sli8_6k.mat'
if TestStuff:
print('setup_inputs Test')
ChunkSize=100
ChunkSizeL=400
FN='/media/a/H1/meas_MID244_gBP_VD11_U19_G35S155_4min_FID22439/AllData_Sli8_100.mat'
else:
print('setup_inputs Train')
ChunkSize=1000
ChunkSizeL=4000
f = h5py.File(FN, 'r')
print('loading Data ' + time.strftime("%Y-%m-%d %H:%M:%S"))
I=f['AllDatax'][:]
print('Loaded labels ' + time.strftime("%Y-%m-%d %H:%M:%S"))
f.close()
I=I.astype(np.float32)
f = h5py.File('/media/a/H1/AllImWithPhaseComplexSingle_h5.mat', 'r')
print('Loading labels ' + time.strftime("%Y-%m-%d %H:%M:%S"))
L=f['AllLh5'][0:(ChunkSizeL)]
print('Loaded labels ' + time.strftime("%Y-%m-%d %H:%M:%S"))
f.close()
L=L.astype(np.float32)
TFI = tf.constant(I[0:ChunkSize])
TFIb = tf.constant(I[(ChunkSize):(2*ChunkSize)])
TFIc = tf.constant(I[(2*ChunkSize):(3*ChunkSize)])
TFId = tf.constant(I[(3*ChunkSize):(4*ChunkSize)])
TFL = tf.constant(L)
# place = tf.placeholder(tf.float32, shape=(DataH, DataW, channelsIn))
# placeL = tf.placeholder(tf.float32, shape=(LabelsH, LabelsW, channelsOut))
Idx=tf.random_uniform([1],minval=0,maxval=ChunkSizeL,dtype=tf.int32)
def f1(): return tf.cond(Idx[0]<ChunkSize, lambda: tf.slice(TFI,[Idx[0],0],[1,-1]), lambda: tf.slice(TFIb,[Idx[0]-ChunkSize,0],[1,-1]))
def f2(): return tf.cond(Idx[0]<(3*ChunkSize), lambda: tf.slice(TFIc,[Idx[0]-2*ChunkSize,0],[1,-1]), lambda: tf.slice(TFId,[Idx[0]-3*ChunkSize,0],[1,-1]))
feature=tf.cond(Idx[0]<(2*ChunkSize), f1, f2)
# feature=tf.cond(Idx[0]<ChunkSize, lambda: tf.slice(TFI,[Idx[0],0],[1,-1]), lambda: tf.slice(TFIb,[Idx[0]-ChunkSize,0],[1,-1]))
# feature=tf.slice(TFI,[Idx[0],0],[1,-1])
# feature = tmp.assign(place)
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature = tf.cast(feature, tf.float32)
labels = tf.slice(TFL,[Idx[0],0,0,0],[1,-1,-1,-1])
# feature = tmpL.assign(placeL)
labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
label = tf.cast(labels, tf.float32)
label=tf.multiply(label,TFMsk)
# Using asynchronous queues
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'SPEN_Local':
SR=scipy.io.loadmat('/media/a/H1/SR.mat')
SR=SR['SR']
SR=np.reshape(SR,[DataH,DataH,1])
SR=np.transpose(SR, (2,0,1))
SR_TF=tf.constant(SR)
# I=scipy.io.loadmat('/media/a/H1/First1kIm256x256Magint16.mat')
# I=I['First1kIm256x256Magint16']
I=scipy.io.loadmat('/media/a/H1/First3kIm256x256Magint16.mat')
I=I['First3kIm256x256Magint16']
TFI = tf.constant(np.float32(I))
Idx=tf.random_uniform([1],minval=0,maxval=3000,dtype=tf.int32)
feature=tf.slice(TFI,[Idx[0],0,0],[1,-1,-1])
feature=tf.transpose(feature, perm=[1,2,0])
feature = tf.random_crop(feature, [DataH, DataW, 1])
mx=tf.reduce_max(feature)
mx=tf.maximum(mx,1)
feature = tf.cast(feature/mx, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
CurIWithPhase=feature*tf.reshape(Q,[DataH,DataW,1])
label=tf.concat([tf.real(CurIWithPhase),tf.imag(CurIWithPhase)],axis=2)
P=tf.transpose(CurIWithPhase, perm=[2,1,0])
F=tf.matmul(P,SR_TF)
F=tf.transpose(F, perm=[2,1,0])
SPENLocalFactor=myParams.myDict['SPENLocalFactor']
F=GT.ExpandWithCopiesOn2(F,DataH,SPENLocalFactor)
feature=tf.concat([tf.real(F),tf.imag(F)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'SPEN_FC':
SR=scipy.io.loadmat('/media/a/H1/SR.mat')
SR=SR['SR']
SR=np.reshape(SR,[DataH,DataH,1])
SR=np.transpose(SR, (2,0,1))
SR_TF=tf.constant(SR)
I=scipy.io.loadmat('/media/a/H1/First1kIm256x256Magint16.mat')
I=I['First1kIm256x256Magint16']
TFI = tf.constant(np.float32(I))
Idx=tf.random_uniform([1],minval=0,maxval=1000,dtype=tf.int32)
feature=tf.slice(TFI,[Idx[0],0,0],[1,-1,-1])
feature=tf.transpose(feature, perm=[1,2,0])
feature = tf.random_crop(feature, [DataH, DataW, 1])
mx=tf.reduce_max(feature)
mx=tf.maximum(mx,1)
feature = tf.cast(feature/mx, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
CurIWithPhase=feature*tf.reshape(Q,[DataH,DataW,1])
label=tf.concat([tf.real(CurIWithPhase),tf.imag(CurIWithPhase)],axis=2)
P=tf.transpose(CurIWithPhase, perm=[2,1,0])
F=tf.matmul(P,SR_TF)
F=tf.transpose(F, perm=[2,1,0])
feature=tf.concat([tf.real(F),tf.imag(F)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'SMASH1DFTxyC':
I=scipy.io.loadmat('/media/a/H1/First3kIm128x128MagSinglex.mat')
I=I['First3kIm128x128MagSingle']
Maps=scipy.io.loadmat('/media/a/H1/maps128x128x8.mat')
Mask=Maps['Msk']
Maps=Maps['maps']
nChannels=8
Mask=np.reshape(Mask,[128, 128, 1])
Maps = tf.constant(Maps)
Mask = tf.constant(np.float32(Mask))
# Maps = tf.constant(np.float32(Maps))
TFI = tf.constant(np.float32(I))
Idx=tf.random_uniform([1],minval=0,maxval=3000,dtype=tf.int32)
feature=tf.slice(TFI,[Idx[0],0,0],[1,-1,-1])
feature = tf.reshape(feature, [128, 128, 1])
feature = tf.multiply(feature,Mask)
feature = tf.cast(feature, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
CurIWithPhase=feature*tf.reshape(Q,[DataH,DataW,1])
WithPhaseAndMaps=tf.multiply(CurIWithPhase,Maps)
label=tf.concat([tf.real(CurIWithPhase),tf.imag(CurIWithPhase)],axis=2)
F=GT.gfft_TFOn3D(WithPhaseAndMaps,DataH,0)
F=GT.gfft_TFOn3D(F,DataW,1)
# now subsample 2
F = tf.reshape(F, [64,2, 128, nChannels])
F=tf.slice(F,[0,0,0,0],[-1,1,-1,-1])
F = tf.reshape(F, [64, 128, nChannels])
feature=tf.concat([tf.real(F),tf.imag(F)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == '1DFTxyCMaps':
I=scipy.io.loadmat('/media/a/H1/First3kIm128x128MagSinglex.mat')
I=I['First3kIm128x128MagSingle']
Maps=scipy.io.loadmat('/media/a/H1/maps128x128x8.mat')
Mask=Maps['Msk']
Maps=Maps['maps']
nChannels=8
Mask=np.reshape(Mask,[128, 128, 1])
Maps = tf.constant(Maps)
Mask = tf.constant(np.float32(Mask))
# Maps = tf.constant(np.float32(Maps))
TFI = tf.constant(np.float32(I))
Idx=tf.random_uniform([1],minval=0,maxval=3000,dtype=tf.int32)
feature=tf.slice(TFI,[Idx[0],0,0],[1,-1,-1])
feature = tf.reshape(feature, [128, 128, 1])
feature = tf.multiply(feature,Mask)
feature = tf.cast(feature, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
CurIWithPhase=feature*tf.reshape(Q,[DataH,DataW,1])
WithPhaseAndMaps=tf.multiply(CurIWithPhase,Maps)
label=tf.concat([tf.real(CurIWithPhase),tf.imag(CurIWithPhase)],axis=2)
F=GT.gfft_TFOn3D(WithPhaseAndMaps,DataH,0)
F=GT.gfft_TFOn3D(F,DataW,1)
feature=tf.concat([tf.real(F),tf.imag(F)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'M2DFT':
I=scipy.io.loadmat('/media/a/H1/First3kIm128x128MagSinglex.mat')
I=I['First3kIm128x128MagSingle']
TFI = tf.constant(np.float32(I))
Idx=tf.random_uniform([1],minval=0,maxval=3000,dtype=tf.int32)
feature=tf.slice(TFI,[Idx[0],0,0],[1,-1,-1])
feature = tf.reshape(feature, [128, 128, 1])
feature = tf.random_crop(feature, [DataH, DataW, 1])
feature = tf.cast(feature, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
IQ=feature*tf.reshape(Q,[DataH,DataW,1])
label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
IQ2=tf.reshape(IQ,IQ.shape[0:2])
IQ2=GT.gfft_TF(IQ2,DataH,0)
IQ2=GT.gfft_TF(IQ2,DataW,1)
feature=tf.reshape(IQ2,[DataH*DataW,1,1])
feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'M1DFTxy':
I=scipy.io.loadmat('/media/a/H1/First3kIm128x128MagSinglex.mat')
I=I['First3kIm128x128MagSingle']
TFI = tf.constant(np.float32(I))
Idx=tf.random_uniform([1],minval=0,maxval=3000,dtype=tf.int32)
feature=tf.slice(TFI,[Idx[0],0,0],[1,-1,-1])
feature = tf.reshape(feature, [128, 128, 1])
feature = tf.random_crop(feature, [DataH, DataW, 1])
feature = tf.cast(feature, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
IQ=feature*tf.reshape(Q,[DataH,DataW,1])
label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
IQ2=tf.reshape(IQ,IQ.shape[0:2])
IQ2=GT.gfft_TF(IQ2,DataH,0)
IQ2=GT.gfft_TF(IQ2,DataW,1)
feature=tf.reshape(IQ2,[DataH,DataW,1])
feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'M1DFTx':
I=scipy.io.loadmat('/media/a/H1/First3kIm128x128MagSinglex.mat')
I=I['First3kIm128x128MagSingle']
TFI = tf.constant(np.float32(I))
Idx=tf.random_uniform([1],minval=0,maxval=3000,dtype=tf.int32)
feature=tf.slice(TFI,[Idx[0],0,0],[1,-1,-1])
feature = tf.reshape(feature, [DataH, DataW, 1])
feature = tf.cast(feature, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
IQ=feature*tf.reshape(Q,[DataH,DataW,1])
label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
IQ2=tf.reshape(IQ,IQ.shape[0:2])
IQ2=GT.gfft_TF(IQ2,DataW,1)
feature=tf.reshape(IQ2,[DataH,DataW,1])
feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'M1DFTy':
I=scipy.io.loadmat('/media/a/H1/First3kIm128x128MagSinglex.mat')
I=I['First3kIm128x128MagSingle']
TFI = tf.constant(np.float32(I))
Idx=tf.random_uniform([1],minval=0,maxval=3000,dtype=tf.int32)
feature=tf.slice(TFI,[Idx[0],0,0],[1,-1,-1])
feature = tf.reshape(feature, [DataH, DataW, 1])
feature = tf.cast(feature, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
IQ=feature*tf.reshape(Q,[DataH,DataW,1])
label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
IQ2=tf.reshape(IQ,IQ.shape[0:2])
IQ2=GT.gfft_TF(IQ2,DataH,0)
feature=tf.reshape(IQ2,[DataH,DataW,1])
feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)
features, labels = tf.train.batch([feature, label],batch_size=batch_size,num_threads=4,capacity = capacity_factor*batch_size,name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
#if image_size is None:
# image_size = FLAGS.sample_size
#pdb.set_trace()
reader = tf.TFRecordReader()
filename_queue = tf.train.string_input_producer(filenames)
key, value = reader.read(filename_queue)
AlsoLabel=True
kKick= myParams.myDict['InputMode'] == 'kKick'
if kKick or myParams.myDict['InputMode'] == '1DFTx' or myParams.myDict['InputMode'] == '1DFTy' or myParams.myDict['InputMode'] == '2DFT':
AlsoLabel=False
if myParams.myDict['InputMode'] == 'AAA':
#filename_queue = tf.Print(filename_queue,[filename_queue,],message='ZZZZZZZZZ:')
keyX=key
value = tf.Print(value,[keyX,],message='QQQ:')
featuresA = tf.parse_single_example(
value,
features={
'CurIs': tf.FixedLenFeature([], tf.string),
'Labels': tf.FixedLenFeature([], tf.string)
})
feature = tf.decode_raw(featuresA['Labels'], tf.float32)
CurIs = tf.decode_raw(featuresA['CurIs'], tf.float32)
CurIs = tf.cast(CurIs, tf.int64)
mx=CurIs
# mx='qwe'+
feature = tf.Print(feature,[keyX,mx],message='QQQ:')
feature = tf.Print(feature,[keyX,mx],message='QQQ:')
feature = tf.Print(feature,[keyX,mx],message='QQQ:')
feature = tf.Print(feature,[keyX,mx],message='QQQ:')
feature = tf.Print(feature,[keyX,mx],message='QQQ:')
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature = tf.cast(feature, tf.float32)
label=feature
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
#image = tf.image.decode_jpeg(value, channels=channels, name="dataset_image")
#print('1')
if AlsoLabel:
featuresA = tf.parse_single_example(
value,
features={
'DataH': tf.FixedLenFeature([], tf.int64),
'DataW': tf.FixedLenFeature([], tf.int64),
'channelsIn': tf.FixedLenFeature([], tf.int64),
'LabelsH': tf.FixedLenFeature([], tf.int64),
'LabelsW': tf.FixedLenFeature([], tf.int64),
'channelsOut': tf.FixedLenFeature([], tf.int64),
'data_raw': tf.FixedLenFeature([], tf.string),
'labels_raw': tf.FixedLenFeature([], tf.string)
})
labels = tf.decode_raw(featuresA['labels_raw'], tf.float32)
else:
featuresA = tf.parse_single_example(
value,
features={
'DataH': tf.FixedLenFeature([], tf.int64),
'DataW': tf.FixedLenFeature([], tf.int64),
'channelsIn': tf.FixedLenFeature([], tf.int64),
'data_raw': tf.FixedLenFeature([], tf.string)
})
feature = tf.decode_raw(featuresA['data_raw'], tf.float32)
print('setup_inputs')
print('Data H,W,#ch: %d,%d,%d -> Labels H,W,#ch %d,%d,%d' % (DataH,DataW,channelsIn,LabelsH,LabelsW,channelsOut))
print('------------------')
if myParams.myDict['InputMode'] == '1DFTy':
feature = tf.reshape(feature, [256, 256, 1])
feature = tf.random_crop(feature, [DataH, DataW, channelsIn])
mm=tf.reduce_mean(feature)
mx=tf.reduce_max(feature)
mx=tf.maximum(mx,1)
#feature = tf.Print(feature,[mm,mx],message='QQQ:')
#assert_op = tf.Assert(tf.greater(mx, 0), [mx])
#with tf.control_dependencies([assert_op]):
feature = tf.cast(feature/mx, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
IQ=feature*tf.reshape(Q,[DataH,DataW,channelsIn])
label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
feature=label
HalfDataW=DataW/2
Id=np.hstack([np.arange(HalfDataW,DataW), np.arange(0,HalfDataW)])
Id=Id.astype(int)
IQ2=tf.reshape(IQ,IQ.shape[0:2])
feature=tf.fft(IQ2)
feature = tf.gather(feature,Id,axis=1)
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == '1DFTx':
feature = tf.reshape(feature, [256, 256, 1])
feature = tf.random_crop(feature, [DataH, DataW, channelsIn])
mm=tf.reduce_mean(feature)
mx=tf.reduce_max(feature)
mx=tf.maximum(mx,1)
#feature = tf.Print(feature,[mm,mx],message='QQQ:')
#assert_op = tf.Assert(tf.greater(mx, 0), [mx])
#with tf.control_dependencies([assert_op]):
feature = tf.cast(feature/mx, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
IQ=feature*tf.reshape(Q,[DataH,DataW,channelsIn])
label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
feature=label
HalfDataH=DataH/2
Id=np.hstack([np.arange(HalfDataH,DataH), np.arange(0,HalfDataH)])
Id=Id.astype(int)
IQ2=tf.reshape(IQ,IQ.shape[0:2])
IQ2 = tf.transpose(IQ2, perm=[1, 0])
feature=tf.fft(IQ2)
feature = tf.gather(feature,Id,axis=1)
feature = tf.transpose(feature, perm=[1,0])
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == '2DFT':
feature = tf.reshape(feature, [256, 256, 1])
feature = tf.random_crop(feature, [DataH, DataW, channelsIn])
mm=tf.reduce_mean(feature)
mx=tf.reduce_max(feature)
mx=tf.maximum(mx,1)
#feature = tf.Print(feature,[mm,mx],message='QQQ:')
#assert_op = tf.Assert(tf.greater(mx, 0), [mx])
#with tf.control_dependencies([assert_op]):
feature = tf.cast(feature/mx, tf.complex64)
Q=GT.TFGenerateRandomSinPhase(DataH, DataW)
IQ=feature*tf.reshape(Q,[DataH,DataW,channelsIn])
label=tf.concat([tf.real(IQ),tf.imag(IQ)],axis=2)
feature=label
HalfDataH=DataH/2
HalfDataW=DataW/2
IdH=np.hstack([np.arange(HalfDataH,DataH), np.arange(0,HalfDataH)])
IdH=IdH.astype(int)
IdW=np.hstack([np.arange(HalfDataW,DataW), np.arange(0,HalfDataW)])
IdW=IdW.astype(int)
IQ2=tf.reshape(IQ,IQ.shape[0:2])
IQ2=tf.fft(IQ2)
IQ2=tf.gather(IQ2,IdW,axis=1)
IQ2 = tf.transpose(IQ2, perm=[1, 0])
feature=tf.fft(IQ2)
feature = tf.gather(feature,IdH,axis=1)
feature = tf.transpose(feature, perm=[1,0])
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature=tf.concat([tf.real(feature),tf.imag(feature)],axis=2)
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if kKick:
filename_queue2 = tf.train.string_input_producer(filenames)
key2, value2 = reader.read(filename_queue2)
featuresA2 = tf.parse_single_example(
value2,
features={
'DataH': tf.FixedLenFeature([], tf.int64),
'DataW': tf.FixedLenFeature([], tf.int64),
'channelsIn': tf.FixedLenFeature([], tf.int64),
'data_raw': tf.FixedLenFeature([], tf.string)
})
feature2 = tf.decode_raw(featuresA2['data_raw'], tf.float32)
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature2 = tf.reshape(feature2, [DataH, DataW, channelsIn])
feature.set_shape([None, None, channelsIn])
feature2.set_shape([None, None, channelsIn])
feature = tf.cast(feature, tf.float32)/tf.reduce_max(feature)
feature2 = tf.cast(feature2, tf.float32)/tf.reduce_max(feature)
feature= tf.concat([feature,feature*0,feature2,feature2*0], 2)
label=feature
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'RegridTry3':
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature = tf.cast(feature, tf.float32)
labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
label = tf.cast(labels, tf.float32)
# Using asynchronous queues
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'RegridTry2':
FullData=scipy.io.loadmat(myParams.myDict['NMAP_FN'])
NMapCR=FullData['NMapCR']
NMapCR = tf.constant(NMapCR)
feature=tf.gather(feature,NMapCR,validate_indices=None,name=None)
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature = tf.cast(feature, tf.float32)
labels = tf.reshape(labels, [128, 128, channelsOut])
# scipy.misc.imresize(arr, size, interp='bilinear', mode=None)
labels = tf.image.resize_images(labels,[LabelsH, LabelsW]) #,method=tf.ResizeMethod.BICUBIC,align_corners=False) # or BILINEAR
label = tf.cast(labels, tf.float32)
# Using asynchronous queues
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'RegridTry1':
# FullData=scipy.io.loadmat('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/NMapIndTesta.mat')
FullData=scipy.io.loadmat(myParams.myDict['NMAP_FN'])
NMapCR=FullData['NMapCR']
NMapCR = tf.constant(NMapCR)
feature=tf.gather(feature,NMapCR,validate_indices=None,name=None)
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature = tf.cast(feature, tf.float32)
labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
label = tf.cast(labels, tf.float32)
# Using asynchronous queues
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
if myParams.myDict['InputMode'] == 'SMASHTry1':
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature = tf.cast(feature, tf.float32)
labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
label = tf.cast(labels, tf.float32)
# Using asynchronous queues
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
"""if myParams.myDict['Mode'] == 'RegridTry1C2':
FullData=scipy.io.loadmat('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/NMapIndC.mat')
NMapCR=FullData['NMapCRC']
NMapCR = tf.constant(NMapCR)
feature=tf.gather(feature,NMapCR,validate_indices=None,name=None)
feature = tf.reshape(feature, [DataH, DataW, channelsIn,2])
feature = tf.cast(feature, tf.float32)
labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
label = tf.cast(labels, tf.float32)
# Using asynchronous queues
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels"""
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
#print('44')
#example.ParseFromString(serialized_example)
#x_1 = np.array(example.features.feature['X'].float_list.value)
# Convert from [depth, height, width] to [height, width, depth].
#result.uint8image = tf.transpose(depth_major, [1, 2, 0])
feature.set_shape([None, None, channelsIn])
labels.set_shape([None, None, channelsOut])
# Crop and other random augmentations
#image = tf.image.random_flip_left_right(image)
#image = tf.image.random_saturation(image, .95, 1.05)
#image = tf.image.random_brightness(image, .05)
#image = tf.image.random_contrast(image, .95, 1.05)
#print('55')
#wiggle = 8
#off_x, off_y = 25-wiggle, 60-wiggle
#crop_size = 128
#crop_size_plus = crop_size + 2*wiggle
#print('56')
#image = tf.image.crop_to_bounding_box(image, off_y, off_x, crop_size_plus, crop_size_plus)
#print('57')
#image = tf.image.crop_to_bounding_box(image, 1, 2, crop_size, crop_size)
#image = tf.random_crop(image, [crop_size, crop_size, 3])
feature = tf.reshape(feature, [DataH, DataW, channelsIn])
feature = tf.cast(feature, tf.float32) #/255.0
labels = tf.reshape(labels, [LabelsH, LabelsW, channelsOut])
label = tf.cast(labels, tf.float32) #/255.0
#if crop_size != image_size:
# image = tf.image.resize_area(image, [image_size, image_size])
# The feature is simply a Kx downscaled version
#K = 1
#downsampled = tf.image.resize_area(image, [image_size//K, image_size//K])
#feature = tf.reshape(downsampled, [image_size//K, image_size//K, 3])
#feature = tf.reshape(downsampled, [image_size//K, image_size//K, 3])
#label = tf.reshape(image, [image_size, image_size, 3])
#feature = tf.reshape(image, [image_size, image_size, channelsIn])
#feature = tf.reshape(image, [1, image_size*image_size*2, channelsIn])
#label = tf.reshape(labels, [image_size, image_size, channelsOut])
# Using asynchronous queues
features, labels = tf.train.batch([feature, label],
batch_size=batch_size,
num_threads=4,
capacity = capacity_factor*batch_size,
name='labels_and_features')
tf.train.start_queue_runners(sess=sess)
return features, labels
| [
"tensorflow.image.resize_images",
"tensorflow.imag",
"tensorflow.get_variable",
"tensorflow.transpose",
"tensorflow.scatter_nd_update",
"numpy.int32",
"tensorflow.multiply",
"tensorflow.real",
"tensorflow.TFRecordReader",
"GTools.TFGenerateRandomSinPhase",
"tensorflow.reduce_mean",
"tensorflow... | [((35891, 35910), 'tensorflow.TFRecordReader', 'tf.TFRecordReader', ([], {}), '()\n', (35908, 35910), True, 'import tensorflow as tf\n'), ((35933, 35974), 'tensorflow.train.string_input_producer', 'tf.train.string_input_producer', (['filenames'], {}), '(filenames)\n', (35963, 35974), True, 'import tensorflow as tf\n'), ((38938, 38986), 'tensorflow.decode_raw', 'tf.decode_raw', (["featuresA['data_raw']", 'tf.float32'], {}), "(featuresA['data_raw'], tf.float32)\n", (38951, 38986), True, 'import tensorflow as tf\n'), ((50360, 50407), 'tensorflow.reshape', 'tf.reshape', (['feature', '[DataH, DataW, channelsIn]'], {}), '(feature, [DataH, DataW, channelsIn])\n', (50370, 50407), True, 'import tensorflow as tf\n'), ((50421, 50472), 'tensorflow.reshape', 'tf.reshape', (['labels', '[LabelsH, LabelsW, channelsOut]'], {}), '(labels, [LabelsH, LabelsW, channelsOut])\n', (50431, 50472), True, 'import tensorflow as tf\n'), ((51532, 51579), 'tensorflow.reshape', 'tf.reshape', (['feature', '[DataH, DataW, channelsIn]'], {}), '(feature, [DataH, DataW, channelsIn])\n', (51542, 51579), True, 'import tensorflow as tf\n'), ((51594, 51622), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.float32'], {}), '(feature, tf.float32)\n', (51601, 51622), True, 'import tensorflow as tf\n'), ((51650, 51701), 'tensorflow.reshape', 'tf.reshape', (['labels', '[LabelsH, LabelsW, channelsOut]'], {}), '(labels, [LabelsH, LabelsW, channelsOut])\n', (51660, 51701), True, 'import tensorflow as tf\n'), ((51714, 51741), 'tensorflow.cast', 'tf.cast', (['labels', 'tf.float32'], {}), '(labels, tf.float32)\n', (51721, 51741), True, 'import tensorflow as tf\n'), ((52527, 52668), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (52541, 52668), True, 'import tensorflow as tf\n'), ((52822, 52861), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (52850, 52861), True, 'import tensorflow as tf\n'), ((744, 772), 'h5py.File', 'h5py.File', (['DatasetMatFN', '"""r"""'], {}), "(DatasetMatFN, 'r')\n", (753, 772), False, 'import h5py\n'), ((1230, 1290), 'numpy.reshape', 'np.reshape', (['SensMsk', '(SensMsk.shape[0], SensMsk.shape[1], 1)'], {}), '(SensMsk, (SensMsk.shape[0], SensMsk.shape[1], 1))\n', (1240, 1290), True, 'import numpy as np\n'), ((2946, 3087), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (2960, 3087), True, 'import tensorflow as tf\n'), ((3088, 3127), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (3116, 3127), True, 'import tensorflow as tf\n'), ((3348, 3376), 'h5py.File', 'h5py.File', (['DatasetMatFN', '"""r"""'], {}), "(DatasetMatFN, 'r')\n", (3357, 3376), False, 'import h5py\n'), ((3592, 3612), 'numpy.float32', 'np.float32', (["f['Min']"], {}), "(f['Min'])\n", (3602, 3612), True, 'import numpy as np\n'), ((3628, 3650), 'numpy.float32', 'np.float32', (["f['Range']"], {}), "(f['Range'])\n", (3638, 3650), True, 'import numpy as np\n'), ((3914, 3942), 'h5py.File', 'h5py.File', (['DatasetMatFN', '"""r"""'], {}), "(DatasetMatFN, 'r')\n", (3923, 3942), False, 'import h5py\n'), ((4003, 4023), 'numpy.float32', 'np.float32', (["f['Min']"], {}), "(f['Min'])\n", (4013, 4023), True, 'import numpy as np\n'), ((4039, 4061), 'numpy.float32', 'np.float32', (["f['Range']"], {}), "(f['Range'])\n", (4049, 4061), True, 'import numpy as np\n'), ((4296, 4365), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': 'TFL.shape[0]', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=TFL.shape[0], dtype=tf.int32)\n', (4313, 4365), True, 'import tensorflow as tf\n'), ((4386, 4428), 'tensorflow.slice', 'tf.slice', (['TFL', '[Idx[0], 0, 0]', '[1, -1, -1]'], {}), '(TFL, [Idx[0], 0, 0], [1, -1, -1])\n', (4394, 4428), True, 'import tensorflow as tf\n'), ((4440, 4489), 'tensorflow.slice', 'tf.slice', (['TFF', '[Idx[0], 0, 0, 0]', '[1, -1, -1, -1]'], {}), '(TFF, [Idx[0], 0, 0, 0], [1, -1, -1, -1])\n', (4448, 4489), True, 'import tensorflow as tf\n'), ((4499, 4525), 'tensorflow.cast', 'tf.cast', (['label', 'tf.float32'], {}), '(label, tf.float32)\n', (4506, 4525), True, 'import tensorflow as tf\n'), ((4544, 4572), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.float32'], {}), '(feature, tf.float32)\n', (4551, 4572), True, 'import tensorflow as tf\n'), ((5050, 5191), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (5064, 5191), True, 'import tensorflow as tf\n'), ((5192, 5231), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (5220, 5231), True, 'import tensorflow as tf\n'), ((5517, 5545), 'h5py.File', 'h5py.File', (['DatasetMatFN', '"""r"""'], {}), "(DatasetMatFN, 'r')\n", (5526, 5545), False, 'import h5py\n'), ((6032, 6060), 'h5py.File', 'h5py.File', (['DatasetMatFN', '"""r"""'], {}), "(DatasetMatFN, 'r')\n", (6041, 6060), False, 'import h5py\n'), ((6295, 6364), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': 'TFL.shape[0]', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=TFL.shape[0], dtype=tf.int32)\n', (6312, 6364), True, 'import tensorflow as tf\n'), ((6429, 6478), 'tensorflow.slice', 'tf.slice', (['TFL', '[Idx[0], 0, 0, 0]', '[1, -1, -1, -1]'], {}), '(TFL, [Idx[0], 0, 0, 0], [1, -1, -1, -1])\n', (6437, 6478), True, 'import tensorflow as tf\n'), ((6487, 6536), 'tensorflow.slice', 'tf.slice', (['TFF', '[Idx[0], 0, 0, 0]', '[1, -1, -1, -1]'], {}), '(TFF, [Idx[0], 0, 0, 0], [1, -1, -1, -1])\n', (6495, 6536), True, 'import tensorflow as tf\n'), ((6546, 6572), 'tensorflow.cast', 'tf.cast', (['label', 'tf.float32'], {}), '(label, tf.float32)\n', (6553, 6572), True, 'import tensorflow as tf\n'), ((6591, 6619), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.float32'], {}), '(feature, tf.float32)\n', (6598, 6619), True, 'import tensorflow as tf\n'), ((7008, 7149), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (7022, 7149), True, 'import tensorflow as tf\n'), ((7150, 7189), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (7178, 7189), True, 'import tensorflow as tf\n'), ((7776, 7839), 'numpy.reshape', 'np.reshape', (['SensMskA', '(SensMskA.shape[0], SensMskA.shape[1], 1)'], {}), '(SensMskA, (SensMskA.shape[0], SensMskA.shape[1], 1))\n', (7786, 7839), True, 'import numpy as np\n'), ((7854, 7917), 'numpy.reshape', 'np.reshape', (['SensMskB', '(SensMskB.shape[0], SensMskB.shape[1], 1)'], {}), '(SensMskB, (SensMskB.shape[0], SensMskB.shape[1], 1))\n', (7864, 7917), True, 'import numpy as np\n'), ((8236, 8264), 'h5py.File', 'h5py.File', (['DatasetMatFN', '"""r"""'], {}), "(DatasetMatFN, 'r')\n", (8245, 8264), False, 'import h5py\n'), ((8665, 8732), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': 'I.shape[0]', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=I.shape[0], dtype=tf.int32)\n', (8682, 8732), True, 'import tensorflow as tf\n'), ((8743, 8810), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': 'I.shape[0]', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=I.shape[0], dtype=tf.int32)\n', (8760, 8810), True, 'import tensorflow as tf\n'), ((8825, 8868), 'tensorflow.slice', 'tf.slice', (['TFI', '[IdxA[0], 0, 0]', '[1, -1, -1]'], {}), '(TFI, [IdxA[0], 0, 0], [1, -1, -1])\n', (8833, 8868), True, 'import tensorflow as tf\n'), ((8880, 8923), 'tensorflow.slice', 'tf.slice', (['TFI', '[IdxB[0], 0, 0]', '[1, -1, -1]'], {}), '(TFI, [IdxB[0], 0, 0], [1, -1, -1])\n', (8888, 8923), True, 'import tensorflow as tf\n'), ((8936, 8974), 'tensorflow.transpose', 'tf.transpose', (['featureA'], {'perm': '[1, 2, 0]'}), '(featureA, perm=[1, 2, 0])\n', (8948, 8974), True, 'import tensorflow as tf\n'), ((8990, 9028), 'tensorflow.transpose', 'tf.transpose', (['featureB'], {'perm': '[1, 2, 0]'}), '(featureB, perm=[1, 2, 0])\n', (9002, 9028), True, 'import tensorflow as tf\n'), ((9047, 9088), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['featureA'], {}), '(featureA)\n', (9078, 9088), True, 'import tensorflow as tf\n'), ((9108, 9146), 'tensorflow.image.random_flip_up_down', 'tf.image.random_flip_up_down', (['featureA'], {}), '(featureA)\n', (9136, 9146), True, 'import tensorflow as tf\n'), ((9158, 9180), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {}), '([1])\n', (9175, 9180), True, 'import tensorflow as tf\n'), ((9302, 9343), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['featureB'], {}), '(featureB)\n', (9333, 9343), True, 'import tensorflow as tf\n'), ((9363, 9401), 'tensorflow.image.random_flip_up_down', 'tf.image.random_flip_up_down', (['featureB'], {}), '(featureB)\n', (9391, 9401), True, 'import tensorflow as tf\n'), ((9413, 9435), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {}), '([1])\n', (9430, 9435), True, 'import tensorflow as tf\n'), ((9565, 9600), 'tensorflow.random_crop', 'tf.random_crop', (['featureA', '[H, W, 1]'], {}), '(featureA, [H, W, 1])\n', (9579, 9600), True, 'import tensorflow as tf\n'), ((9620, 9655), 'tensorflow.random_crop', 'tf.random_crop', (['featureB', '[H, W, 1]'], {}), '(featureB, [H, W, 1])\n', (9634, 9655), True, 'import tensorflow as tf\n'), ((9676, 9703), 'tensorflow.cast', 'tf.cast', (['featureA', 'tf.int32'], {}), '(featureA, tf.int32)\n', (9683, 9703), True, 'import tensorflow as tf\n'), ((9723, 9750), 'tensorflow.cast', 'tf.cast', (['featureB', 'tf.int32'], {}), '(featureB, tf.int32)\n', (9730, 9750), True, 'import tensorflow as tf\n'), ((9872, 9909), 'tensorflow.cast', 'tf.cast', (['(featureA / mxA)', 'tf.complex64'], {}), '(featureA / mxA, tf.complex64)\n', (9879, 9909), True, 'import tensorflow as tf\n'), ((9927, 9964), 'tensorflow.cast', 'tf.cast', (['(featureB / mxB)', 'tf.complex64'], {}), '(featureB / mxB, tf.complex64)\n', (9934, 9964), True, 'import tensorflow as tf\n'), ((9981, 10010), 'tensorflow.multiply', 'tf.multiply', (['featureA', 'TFMskA'], {}), '(featureA, TFMskA)\n', (9992, 10010), True, 'import tensorflow as tf\n'), ((10027, 10056), 'tensorflow.multiply', 'tf.multiply', (['featureB', 'TFMskB'], {}), '(featureB, TFMskB)\n', (10038, 10056), True, 'import tensorflow as tf\n'), ((10230, 10281), 'GTools.TFGenerateRandomSinPhase', 'GT.TFGenerateRandomSinPhase', (['H', 'W', 'LFac', 'QFac', 'SFac'], {}), '(H, W, LFac, QFac, SFac)\n', (10257, 10281), True, 'import GTools as GT\n'), ((10332, 10383), 'GTools.TFGenerateRandomSinPhase', 'GT.TFGenerateRandomSinPhase', (['H', 'W', 'LFac', 'QFac', 'SFac'], {}), '(H, W, LFac, QFac, SFac)\n', (10359, 10383), True, 'import GTools as GT\n'), ((10874, 10926), 'GTools.TF_TSNUFFT_Prepare', 'GT.TF_TSNUFFT_Prepare', (['SN', 'SensA', 'TSCA', 'TSBFA', 'Kd', 'P'], {}), '(SN, SensA, TSCA, TSBFA, Kd, P)\n', (10895, 10926), True, 'import GTools as GT\n'), ((10961, 11013), 'GTools.TF_TSNUFFT_Prepare', 'GT.TF_TSNUFFT_Prepare', (['SN', 'SensB', 'TSCB', 'TSBFB', 'Kd', 'P'], {}), '(SN, SensB, TSCB, TSBFB, Kd, P)\n', (10982, 11013), True, 'import GTools as GT\n'), ((15223, 15262), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (15251, 15262), True, 'import tensorflow as tf\n'), ((15743, 15803), 'numpy.reshape', 'np.reshape', (['SensMsk', '(SensMsk.shape[0], SensMsk.shape[1], 1)'], {}), '(SensMsk, (SensMsk.shape[0], SensMsk.shape[1], 1))\n', (15753, 15803), True, 'import numpy as np\n'), ((16182, 16210), 'h5py.File', 'h5py.File', (['DatasetMatFN', '"""r"""'], {}), "(DatasetMatFN, 'r')\n", (16191, 16210), False, 'import h5py\n'), ((16726, 16793), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': 'I.shape[0]', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=I.shape[0], dtype=tf.int32)\n', (16743, 16793), True, 'import tensorflow as tf\n'), ((16807, 16849), 'tensorflow.slice', 'tf.slice', (['TFI', '[Idx[0], 0, 0]', '[1, -1, -1]'], {}), '(TFI, [Idx[0], 0, 0], [1, -1, -1])\n', (16815, 16849), True, 'import tensorflow as tf\n'), ((16861, 16898), 'tensorflow.transpose', 'tf.transpose', (['feature'], {'perm': '[1, 2, 0]'}), '(feature, perm=[1, 2, 0])\n', (16873, 16898), True, 'import tensorflow as tf\n'), ((16916, 16956), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['feature'], {}), '(feature)\n', (16947, 16956), True, 'import tensorflow as tf\n'), ((16975, 17012), 'tensorflow.image.random_flip_up_down', 'tf.image.random_flip_up_down', (['feature'], {}), '(feature)\n', (17003, 17012), True, 'import tensorflow as tf\n'), ((17083, 17105), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {}), '([1])\n', (17100, 17105), True, 'import tensorflow as tf\n'), ((17773, 17807), 'tensorflow.random_crop', 'tf.random_crop', (['feature', '[H, W, 1]'], {}), '(feature, [H, W, 1])\n', (17787, 17807), True, 'import tensorflow as tf\n'), ((17827, 17853), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.int32'], {}), '(feature, tf.int32)\n', (17834, 17853), True, 'import tensorflow as tf\n'), ((17866, 17888), 'tensorflow.reduce_max', 'tf.reduce_max', (['feature'], {}), '(feature)\n', (17879, 17888), True, 'import tensorflow as tf\n'), ((17900, 17917), 'tensorflow.maximum', 'tf.maximum', (['mx', '(1)'], {}), '(mx, 1)\n', (17910, 17917), True, 'import tensorflow as tf\n'), ((17936, 17971), 'tensorflow.cast', 'tf.cast', (['(feature / mx)', 'tf.complex64'], {}), '(feature / mx, tf.complex64)\n', (17943, 17971), True, 'import tensorflow as tf\n'), ((17987, 18014), 'tensorflow.multiply', 'tf.multiply', (['feature', 'TFMsk'], {}), '(feature, TFMsk)\n', (17998, 18014), True, 'import tensorflow as tf\n'), ((18025, 18058), 'GTools.TFGenerateRandomSinPhase', 'GT.TFGenerateRandomSinPhase', (['H', 'W'], {}), '(H, W)\n', (18052, 18058), True, 'import GTools as GT\n'), ((18492, 18541), 'GTools.TF_TSNUFFT_Prepare', 'GT.TF_TSNUFFT_Prepare', (['SN', 'Sens', 'TSC', 'TSBF', 'Kd', 'P'], {}), '(SN, Sens, TSC, TSBF, Kd, P)\n', (18513, 18541), True, 'import GTools as GT\n'), ((22002, 22041), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (22030, 22041), True, 'import tensorflow as tf\n'), ((22277, 22293), 'tensorflow.constant', 'tf.constant', (['Msk'], {}), '(Msk)\n', (22288, 22293), True, 'import tensorflow as tf\n'), ((22720, 22738), 'h5py.File', 'h5py.File', (['FN', '"""r"""'], {}), "(FN, 'r')\n", (22729, 22738), False, 'import h5py\n'), ((22965, 23029), 'h5py.File', 'h5py.File', (['"""/media/a/H1/AllImWithPhaseComplexSingle_h5.mat"""', '"""r"""'], {}), "('/media/a/H1/AllImWithPhaseComplexSingle_h5.mat', 'r')\n", (22974, 23029), False, 'import h5py\n'), ((23271, 23298), 'tensorflow.constant', 'tf.constant', (['I[0:ChunkSize]'], {}), '(I[0:ChunkSize])\n', (23282, 23298), True, 'import tensorflow as tf\n'), ((23314, 23353), 'tensorflow.constant', 'tf.constant', (['I[ChunkSize:2 * ChunkSize]'], {}), '(I[ChunkSize:2 * ChunkSize])\n', (23325, 23353), True, 'import tensorflow as tf\n'), ((23371, 23414), 'tensorflow.constant', 'tf.constant', (['I[2 * ChunkSize:3 * ChunkSize]'], {}), '(I[2 * ChunkSize:3 * ChunkSize])\n', (23382, 23414), True, 'import tensorflow as tf\n'), ((23430, 23473), 'tensorflow.constant', 'tf.constant', (['I[3 * ChunkSize:4 * ChunkSize]'], {}), '(I[3 * ChunkSize:4 * ChunkSize])\n', (23441, 23473), True, 'import tensorflow as tf\n'), ((23489, 23503), 'tensorflow.constant', 'tf.constant', (['L'], {}), '(L)\n', (23500, 23503), True, 'import tensorflow as tf\n'), ((23682, 23749), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': 'ChunkSizeL', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=ChunkSizeL, dtype=tf.int32)\n', (23699, 23749), True, 'import tensorflow as tf\n'), ((24071, 24110), 'tensorflow.cond', 'tf.cond', (['(Idx[0] < 2 * ChunkSize)', 'f1', 'f2'], {}), '(Idx[0] < 2 * ChunkSize, f1, f2)\n', (24078, 24110), True, 'import tensorflow as tf\n'), ((24355, 24402), 'tensorflow.reshape', 'tf.reshape', (['feature', '[DataH, DataW, channelsIn]'], {}), '(feature, [DataH, DataW, channelsIn])\n', (24365, 24402), True, 'import tensorflow as tf\n'), ((24421, 24449), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.float32'], {}), '(feature, tf.float32)\n', (24428, 24449), True, 'import tensorflow as tf\n'), ((24468, 24517), 'tensorflow.slice', 'tf.slice', (['TFL', '[Idx[0], 0, 0, 0]', '[1, -1, -1, -1]'], {}), '(TFL, [Idx[0], 0, 0, 0], [1, -1, -1, -1])\n', (24476, 24517), True, 'import tensorflow as tf\n'), ((24575, 24626), 'tensorflow.reshape', 'tf.reshape', (['labels', '[LabelsH, LabelsW, channelsOut]'], {}), '(labels, [LabelsH, LabelsW, channelsOut])\n', (24585, 24626), True, 'import tensorflow as tf\n'), ((24643, 24670), 'tensorflow.cast', 'tf.cast', (['labels', 'tf.float32'], {}), '(labels, tf.float32)\n', (24650, 24670), True, 'import tensorflow as tf\n'), ((24685, 24710), 'tensorflow.multiply', 'tf.multiply', (['label', 'TFMsk'], {}), '(label, TFMsk)\n', (24696, 24710), True, 'import tensorflow as tf\n'), ((24774, 24915), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (24788, 24915), True, 'import tensorflow as tf\n'), ((25089, 25128), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (25117, 25128), True, 'import tensorflow as tf\n'), ((25311, 25344), 'numpy.reshape', 'np.reshape', (['SR', '[DataH, DataH, 1]'], {}), '(SR, [DataH, DataH, 1])\n', (25321, 25344), True, 'import numpy as np\n'), ((25353, 25380), 'numpy.transpose', 'np.transpose', (['SR', '(2, 0, 1)'], {}), '(SR, (2, 0, 1))\n', (25365, 25380), True, 'import numpy as np\n'), ((25393, 25408), 'tensorflow.constant', 'tf.constant', (['SR'], {}), '(SR)\n', (25404, 25408), True, 'import tensorflow as tf\n'), ((25698, 25759), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': '(3000)', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=3000, dtype=tf.int32)\n', (25715, 25759), True, 'import tensorflow as tf\n'), ((25773, 25815), 'tensorflow.slice', 'tf.slice', (['TFI', '[Idx[0], 0, 0]', '[1, -1, -1]'], {}), '(TFI, [Idx[0], 0, 0], [1, -1, -1])\n', (25781, 25815), True, 'import tensorflow as tf\n'), ((25827, 25864), 'tensorflow.transpose', 'tf.transpose', (['feature'], {'perm': '[1, 2, 0]'}), '(feature, perm=[1, 2, 0])\n', (25839, 25864), True, 'import tensorflow as tf\n'), ((25882, 25924), 'tensorflow.random_crop', 'tf.random_crop', (['feature', '[DataH, DataW, 1]'], {}), '(feature, [DataH, DataW, 1])\n', (25896, 25924), True, 'import tensorflow as tf\n'), ((25937, 25959), 'tensorflow.reduce_max', 'tf.reduce_max', (['feature'], {}), '(feature)\n', (25950, 25959), True, 'import tensorflow as tf\n'), ((25971, 25988), 'tensorflow.maximum', 'tf.maximum', (['mx', '(1)'], {}), '(mx, 1)\n', (25981, 25988), True, 'import tensorflow as tf\n'), ((26007, 26042), 'tensorflow.cast', 'tf.cast', (['(feature / mx)', 'tf.complex64'], {}), '(feature / mx, tf.complex64)\n', (26014, 26042), True, 'import tensorflow as tf\n'), ((26052, 26093), 'GTools.TFGenerateRandomSinPhase', 'GT.TFGenerateRandomSinPhase', (['DataH', 'DataW'], {}), '(DataH, DataW)\n', (26079, 26093), True, 'import GTools as GT\n'), ((26262, 26305), 'tensorflow.transpose', 'tf.transpose', (['CurIWithPhase'], {'perm': '[2, 1, 0]'}), '(CurIWithPhase, perm=[2, 1, 0])\n', (26274, 26305), True, 'import tensorflow as tf\n'), ((26314, 26333), 'tensorflow.matmul', 'tf.matmul', (['P', 'SR_TF'], {}), '(P, SR_TF)\n', (26323, 26333), True, 'import tensorflow as tf\n'), ((26343, 26374), 'tensorflow.transpose', 'tf.transpose', (['F'], {'perm': '[2, 1, 0]'}), '(F, perm=[2, 1, 0])\n', (26355, 26374), True, 'import tensorflow as tf\n'), ((26451, 26500), 'GTools.ExpandWithCopiesOn2', 'GT.ExpandWithCopiesOn2', (['F', 'DataH', 'SPENLocalFactor'], {}), '(F, DataH, SPENLocalFactor)\n', (26473, 26500), True, 'import GTools as GT\n'), ((26586, 26727), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (26600, 26727), True, 'import tensorflow as tf\n'), ((26728, 26767), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (26756, 26767), True, 'import tensorflow as tf\n'), ((26934, 26967), 'numpy.reshape', 'np.reshape', (['SR', '[DataH, DataH, 1]'], {}), '(SR, [DataH, DataH, 1])\n', (26944, 26967), True, 'import numpy as np\n'), ((26976, 27003), 'numpy.transpose', 'np.transpose', (['SR', '(2, 0, 1)'], {}), '(SR, (2, 0, 1))\n', (26988, 27003), True, 'import numpy as np\n'), ((27016, 27031), 'tensorflow.constant', 'tf.constant', (['SR'], {}), '(SR)\n', (27027, 27031), True, 'import tensorflow as tf\n'), ((27206, 27267), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': '(1000)', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=1000, dtype=tf.int32)\n', (27223, 27267), True, 'import tensorflow as tf\n'), ((27281, 27323), 'tensorflow.slice', 'tf.slice', (['TFI', '[Idx[0], 0, 0]', '[1, -1, -1]'], {}), '(TFI, [Idx[0], 0, 0], [1, -1, -1])\n', (27289, 27323), True, 'import tensorflow as tf\n'), ((27335, 27372), 'tensorflow.transpose', 'tf.transpose', (['feature'], {'perm': '[1, 2, 0]'}), '(feature, perm=[1, 2, 0])\n', (27347, 27372), True, 'import tensorflow as tf\n'), ((27390, 27432), 'tensorflow.random_crop', 'tf.random_crop', (['feature', '[DataH, DataW, 1]'], {}), '(feature, [DataH, DataW, 1])\n', (27404, 27432), True, 'import tensorflow as tf\n'), ((27445, 27467), 'tensorflow.reduce_max', 'tf.reduce_max', (['feature'], {}), '(feature)\n', (27458, 27467), True, 'import tensorflow as tf\n'), ((27479, 27496), 'tensorflow.maximum', 'tf.maximum', (['mx', '(1)'], {}), '(mx, 1)\n', (27489, 27496), True, 'import tensorflow as tf\n'), ((27515, 27550), 'tensorflow.cast', 'tf.cast', (['(feature / mx)', 'tf.complex64'], {}), '(feature / mx, tf.complex64)\n', (27522, 27550), True, 'import tensorflow as tf\n'), ((27560, 27601), 'GTools.TFGenerateRandomSinPhase', 'GT.TFGenerateRandomSinPhase', (['DataH', 'DataW'], {}), '(DataH, DataW)\n', (27587, 27601), True, 'import GTools as GT\n'), ((27770, 27813), 'tensorflow.transpose', 'tf.transpose', (['CurIWithPhase'], {'perm': '[2, 1, 0]'}), '(CurIWithPhase, perm=[2, 1, 0])\n', (27782, 27813), True, 'import tensorflow as tf\n'), ((27822, 27841), 'tensorflow.matmul', 'tf.matmul', (['P', 'SR_TF'], {}), '(P, SR_TF)\n', (27831, 27841), True, 'import tensorflow as tf\n'), ((27851, 27882), 'tensorflow.transpose', 'tf.transpose', (['F'], {'perm': '[2, 1, 0]'}), '(F, perm=[2, 1, 0])\n', (27863, 27882), True, 'import tensorflow as tf\n'), ((27976, 28117), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (27990, 28117), True, 'import tensorflow as tf\n'), ((28118, 28157), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (28146, 28157), True, 'import tensorflow as tf\n'), ((28509, 28540), 'numpy.reshape', 'np.reshape', (['Mask', '[128, 128, 1]'], {}), '(Mask, [128, 128, 1])\n', (28519, 28540), True, 'import numpy as np\n'), ((28556, 28573), 'tensorflow.constant', 'tf.constant', (['Maps'], {}), '(Maps)\n', (28567, 28573), True, 'import tensorflow as tf\n'), ((28720, 28781), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': '(3000)', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=3000, dtype=tf.int32)\n', (28737, 28781), True, 'import tensorflow as tf\n'), ((28795, 28837), 'tensorflow.slice', 'tf.slice', (['TFI', '[Idx[0], 0, 0]', '[1, -1, -1]'], {}), '(TFI, [Idx[0], 0, 0], [1, -1, -1])\n', (28803, 28837), True, 'import tensorflow as tf\n'), ((28851, 28885), 'tensorflow.reshape', 'tf.reshape', (['feature', '[128, 128, 1]'], {}), '(feature, [128, 128, 1])\n', (28861, 28885), True, 'import tensorflow as tf\n'), ((28905, 28931), 'tensorflow.multiply', 'tf.multiply', (['feature', 'Mask'], {}), '(feature, Mask)\n', (28916, 28931), True, 'import tensorflow as tf\n'), ((28958, 28988), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.complex64'], {}), '(feature, tf.complex64)\n', (28965, 28988), True, 'import tensorflow as tf\n'), ((29000, 29041), 'GTools.TFGenerateRandomSinPhase', 'GT.TFGenerateRandomSinPhase', (['DataH', 'DataW'], {}), '(DataH, DataW)\n', (29027, 29041), True, 'import GTools as GT\n'), ((29128, 29160), 'tensorflow.multiply', 'tf.multiply', (['CurIWithPhase', 'Maps'], {}), '(CurIWithPhase, Maps)\n', (29139, 29160), True, 'import tensorflow as tf\n'), ((29277, 29319), 'GTools.gfft_TFOn3D', 'GT.gfft_TFOn3D', (['WithPhaseAndMaps', 'DataH', '(0)'], {}), '(WithPhaseAndMaps, DataH, 0)\n', (29291, 29319), True, 'import GTools as GT\n'), ((29328, 29355), 'GTools.gfft_TFOn3D', 'GT.gfft_TFOn3D', (['F', 'DataW', '(1)'], {}), '(F, DataW, 1)\n', (29342, 29355), True, 'import GTools as GT\n'), ((29401, 29439), 'tensorflow.reshape', 'tf.reshape', (['F', '[64, 2, 128, nChannels]'], {}), '(F, [64, 2, 128, nChannels])\n', (29411, 29439), True, 'import tensorflow as tf\n'), ((29449, 29491), 'tensorflow.slice', 'tf.slice', (['F', '[0, 0, 0, 0]', '[-1, 1, -1, -1]'], {}), '(F, [0, 0, 0, 0], [-1, 1, -1, -1])\n', (29457, 29491), True, 'import tensorflow as tf\n'), ((29496, 29531), 'tensorflow.reshape', 'tf.reshape', (['F', '[64, 128, nChannels]'], {}), '(F, [64, 128, nChannels])\n', (29506, 29531), True, 'import tensorflow as tf\n'), ((29619, 29760), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (29633, 29760), True, 'import tensorflow as tf\n'), ((29761, 29800), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (29789, 29800), True, 'import tensorflow as tf\n'), ((30151, 30182), 'numpy.reshape', 'np.reshape', (['Mask', '[128, 128, 1]'], {}), '(Mask, [128, 128, 1])\n', (30161, 30182), True, 'import numpy as np\n'), ((30198, 30215), 'tensorflow.constant', 'tf.constant', (['Maps'], {}), '(Maps)\n', (30209, 30215), True, 'import tensorflow as tf\n'), ((30362, 30423), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': '(3000)', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=3000, dtype=tf.int32)\n', (30379, 30423), True, 'import tensorflow as tf\n'), ((30437, 30479), 'tensorflow.slice', 'tf.slice', (['TFI', '[Idx[0], 0, 0]', '[1, -1, -1]'], {}), '(TFI, [Idx[0], 0, 0], [1, -1, -1])\n', (30445, 30479), True, 'import tensorflow as tf\n'), ((30493, 30527), 'tensorflow.reshape', 'tf.reshape', (['feature', '[128, 128, 1]'], {}), '(feature, [128, 128, 1])\n', (30503, 30527), True, 'import tensorflow as tf\n'), ((30547, 30573), 'tensorflow.multiply', 'tf.multiply', (['feature', 'Mask'], {}), '(feature, Mask)\n', (30558, 30573), True, 'import tensorflow as tf\n'), ((30600, 30630), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.complex64'], {}), '(feature, tf.complex64)\n', (30607, 30630), True, 'import tensorflow as tf\n'), ((30642, 30683), 'GTools.TFGenerateRandomSinPhase', 'GT.TFGenerateRandomSinPhase', (['DataH', 'DataW'], {}), '(DataH, DataW)\n', (30669, 30683), True, 'import GTools as GT\n'), ((30770, 30802), 'tensorflow.multiply', 'tf.multiply', (['CurIWithPhase', 'Maps'], {}), '(CurIWithPhase, Maps)\n', (30781, 30802), True, 'import tensorflow as tf\n'), ((30919, 30961), 'GTools.gfft_TFOn3D', 'GT.gfft_TFOn3D', (['WithPhaseAndMaps', 'DataH', '(0)'], {}), '(WithPhaseAndMaps, DataH, 0)\n', (30933, 30961), True, 'import GTools as GT\n'), ((30970, 30997), 'GTools.gfft_TFOn3D', 'GT.gfft_TFOn3D', (['F', 'DataW', '(1)'], {}), '(F, DataW, 1)\n', (30984, 30997), True, 'import GTools as GT\n'), ((31091, 31232), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (31105, 31232), True, 'import tensorflow as tf\n'), ((31233, 31272), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (31261, 31272), True, 'import tensorflow as tf\n'), ((31522, 31583), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': '(3000)', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=3000, dtype=tf.int32)\n', (31539, 31583), True, 'import tensorflow as tf\n'), ((31597, 31639), 'tensorflow.slice', 'tf.slice', (['TFI', '[Idx[0], 0, 0]', '[1, -1, -1]'], {}), '(TFI, [Idx[0], 0, 0], [1, -1, -1])\n', (31605, 31639), True, 'import tensorflow as tf\n'), ((31653, 31687), 'tensorflow.reshape', 'tf.reshape', (['feature', '[128, 128, 1]'], {}), '(feature, [128, 128, 1])\n', (31663, 31687), True, 'import tensorflow as tf\n'), ((31706, 31748), 'tensorflow.random_crop', 'tf.random_crop', (['feature', '[DataH, DataW, 1]'], {}), '(feature, [DataH, DataW, 1])\n', (31720, 31748), True, 'import tensorflow as tf\n'), ((31768, 31798), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.complex64'], {}), '(feature, tf.complex64)\n', (31775, 31798), True, 'import tensorflow as tf\n'), ((31818, 31859), 'GTools.TFGenerateRandomSinPhase', 'GT.TFGenerateRandomSinPhase', (['DataH', 'DataW'], {}), '(DataH, DataW)\n', (31845, 31859), True, 'import GTools as GT\n'), ((31997, 32026), 'tensorflow.reshape', 'tf.reshape', (['IQ', 'IQ.shape[0:2]'], {}), '(IQ, IQ.shape[0:2])\n', (32007, 32026), True, 'import tensorflow as tf\n'), ((32039, 32064), 'GTools.gfft_TF', 'GT.gfft_TF', (['IQ2', 'DataH', '(0)'], {}), '(IQ2, DataH, 0)\n', (32049, 32064), True, 'import GTools as GT\n'), ((32075, 32100), 'GTools.gfft_TF', 'GT.gfft_TF', (['IQ2', 'DataW', '(1)'], {}), '(IQ2, DataW, 1)\n', (32085, 32100), True, 'import GTools as GT\n'), ((32115, 32153), 'tensorflow.reshape', 'tf.reshape', (['IQ2', '[DataH * DataW, 1, 1]'], {}), '(IQ2, [DataH * DataW, 1, 1])\n', (32125, 32153), True, 'import tensorflow as tf\n'), ((32256, 32397), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (32270, 32397), True, 'import tensorflow as tf\n'), ((32398, 32437), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (32426, 32437), True, 'import tensorflow as tf\n'), ((32689, 32750), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': '(3000)', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=3000, dtype=tf.int32)\n', (32706, 32750), True, 'import tensorflow as tf\n'), ((32764, 32806), 'tensorflow.slice', 'tf.slice', (['TFI', '[Idx[0], 0, 0]', '[1, -1, -1]'], {}), '(TFI, [Idx[0], 0, 0], [1, -1, -1])\n', (32772, 32806), True, 'import tensorflow as tf\n'), ((32820, 32854), 'tensorflow.reshape', 'tf.reshape', (['feature', '[128, 128, 1]'], {}), '(feature, [128, 128, 1])\n', (32830, 32854), True, 'import tensorflow as tf\n'), ((32873, 32915), 'tensorflow.random_crop', 'tf.random_crop', (['feature', '[DataH, DataW, 1]'], {}), '(feature, [DataH, DataW, 1])\n', (32887, 32915), True, 'import tensorflow as tf\n'), ((32935, 32965), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.complex64'], {}), '(feature, tf.complex64)\n', (32942, 32965), True, 'import tensorflow as tf\n'), ((32985, 33026), 'GTools.TFGenerateRandomSinPhase', 'GT.TFGenerateRandomSinPhase', (['DataH', 'DataW'], {}), '(DataH, DataW)\n', (33012, 33026), True, 'import GTools as GT\n'), ((33164, 33193), 'tensorflow.reshape', 'tf.reshape', (['IQ', 'IQ.shape[0:2]'], {}), '(IQ, IQ.shape[0:2])\n', (33174, 33193), True, 'import tensorflow as tf\n'), ((33206, 33231), 'GTools.gfft_TF', 'GT.gfft_TF', (['IQ2', 'DataH', '(0)'], {}), '(IQ2, DataH, 0)\n', (33216, 33231), True, 'import GTools as GT\n'), ((33242, 33267), 'GTools.gfft_TF', 'GT.gfft_TF', (['IQ2', 'DataW', '(1)'], {}), '(IQ2, DataW, 1)\n', (33252, 33267), True, 'import GTools as GT\n'), ((33282, 33316), 'tensorflow.reshape', 'tf.reshape', (['IQ2', '[DataH, DataW, 1]'], {}), '(IQ2, [DataH, DataW, 1])\n', (33292, 33316), True, 'import tensorflow as tf\n'), ((33421, 33562), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (33435, 33562), True, 'import tensorflow as tf\n'), ((33563, 33602), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (33591, 33602), True, 'import tensorflow as tf\n'), ((33853, 33914), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': '(3000)', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=3000, dtype=tf.int32)\n', (33870, 33914), True, 'import tensorflow as tf\n'), ((33928, 33970), 'tensorflow.slice', 'tf.slice', (['TFI', '[Idx[0], 0, 0]', '[1, -1, -1]'], {}), '(TFI, [Idx[0], 0, 0], [1, -1, -1])\n', (33936, 33970), True, 'import tensorflow as tf\n'), ((33984, 34022), 'tensorflow.reshape', 'tf.reshape', (['feature', '[DataH, DataW, 1]'], {}), '(feature, [DataH, DataW, 1])\n', (33994, 34022), True, 'import tensorflow as tf\n'), ((34042, 34072), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.complex64'], {}), '(feature, tf.complex64)\n', (34049, 34072), True, 'import tensorflow as tf\n'), ((34092, 34133), 'GTools.TFGenerateRandomSinPhase', 'GT.TFGenerateRandomSinPhase', (['DataH', 'DataW'], {}), '(DataH, DataW)\n', (34119, 34133), True, 'import GTools as GT\n'), ((34271, 34300), 'tensorflow.reshape', 'tf.reshape', (['IQ', 'IQ.shape[0:2]'], {}), '(IQ, IQ.shape[0:2])\n', (34281, 34300), True, 'import tensorflow as tf\n'), ((34313, 34338), 'GTools.gfft_TF', 'GT.gfft_TF', (['IQ2', 'DataW', '(1)'], {}), '(IQ2, DataW, 1)\n', (34323, 34338), True, 'import GTools as GT\n'), ((34353, 34387), 'tensorflow.reshape', 'tf.reshape', (['IQ2', '[DataH, DataW, 1]'], {}), '(IQ2, [DataH, DataW, 1])\n', (34363, 34387), True, 'import tensorflow as tf\n'), ((34492, 34633), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (34506, 34633), True, 'import tensorflow as tf\n'), ((34634, 34673), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (34662, 34673), True, 'import tensorflow as tf\n'), ((34924, 34985), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': '(3000)', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=3000, dtype=tf.int32)\n', (34941, 34985), True, 'import tensorflow as tf\n'), ((34999, 35041), 'tensorflow.slice', 'tf.slice', (['TFI', '[Idx[0], 0, 0]', '[1, -1, -1]'], {}), '(TFI, [Idx[0], 0, 0], [1, -1, -1])\n', (35007, 35041), True, 'import tensorflow as tf\n'), ((35055, 35093), 'tensorflow.reshape', 'tf.reshape', (['feature', '[DataH, DataW, 1]'], {}), '(feature, [DataH, DataW, 1])\n', (35065, 35093), True, 'import tensorflow as tf\n'), ((35113, 35143), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.complex64'], {}), '(feature, tf.complex64)\n', (35120, 35143), True, 'import tensorflow as tf\n'), ((35163, 35204), 'GTools.TFGenerateRandomSinPhase', 'GT.TFGenerateRandomSinPhase', (['DataH', 'DataW'], {}), '(DataH, DataW)\n', (35190, 35204), True, 'import GTools as GT\n'), ((35342, 35371), 'tensorflow.reshape', 'tf.reshape', (['IQ', 'IQ.shape[0:2]'], {}), '(IQ, IQ.shape[0:2])\n', (35352, 35371), True, 'import tensorflow as tf\n'), ((35384, 35409), 'GTools.gfft_TF', 'GT.gfft_TF', (['IQ2', 'DataH', '(0)'], {}), '(IQ2, DataH, 0)\n', (35394, 35409), True, 'import GTools as GT\n'), ((35424, 35458), 'tensorflow.reshape', 'tf.reshape', (['IQ2', '[DataH, DataW, 1]'], {}), '(IQ2, [DataH, DataW, 1])\n', (35434, 35458), True, 'import tensorflow as tf\n'), ((35563, 35704), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (35577, 35704), True, 'import tensorflow as tf\n'), ((35705, 35744), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (35733, 35744), True, 'import tensorflow as tf\n'), ((36441, 36480), 'tensorflow.Print', 'tf.Print', (['value', '[keyX]'], {'message': '"""QQQ:"""'}), "(value, [keyX], message='QQQ:')\n", (36449, 36480), True, 'import tensorflow as tf\n'), ((36721, 36767), 'tensorflow.decode_raw', 'tf.decode_raw', (["featuresA['Labels']", 'tf.float32'], {}), "(featuresA['Labels'], tf.float32)\n", (36734, 36767), True, 'import tensorflow as tf\n'), ((36784, 36829), 'tensorflow.decode_raw', 'tf.decode_raw', (["featuresA['CurIs']", 'tf.float32'], {}), "(featuresA['CurIs'], tf.float32)\n", (36797, 36829), True, 'import tensorflow as tf\n'), ((36846, 36870), 'tensorflow.cast', 'tf.cast', (['CurIs', 'tf.int64'], {}), '(CurIs, tf.int64)\n', (36853, 36870), True, 'import tensorflow as tf\n'), ((36927, 36972), 'tensorflow.Print', 'tf.Print', (['feature', '[keyX, mx]'], {'message': '"""QQQ:"""'}), "(feature, [keyX, mx], message='QQQ:')\n", (36935, 36972), True, 'import tensorflow as tf\n'), ((36988, 37033), 'tensorflow.Print', 'tf.Print', (['feature', '[keyX, mx]'], {'message': '"""QQQ:"""'}), "(feature, [keyX, mx], message='QQQ:')\n", (36996, 37033), True, 'import tensorflow as tf\n'), ((37049, 37094), 'tensorflow.Print', 'tf.Print', (['feature', '[keyX, mx]'], {'message': '"""QQQ:"""'}), "(feature, [keyX, mx], message='QQQ:')\n", (37057, 37094), True, 'import tensorflow as tf\n'), ((37110, 37155), 'tensorflow.Print', 'tf.Print', (['feature', '[keyX, mx]'], {'message': '"""QQQ:"""'}), "(feature, [keyX, mx], message='QQQ:')\n", (37118, 37155), True, 'import tensorflow as tf\n'), ((37171, 37216), 'tensorflow.Print', 'tf.Print', (['feature', '[keyX, mx]'], {'message': '"""QQQ:"""'}), "(feature, [keyX, mx], message='QQQ:')\n", (37179, 37216), True, 'import tensorflow as tf\n'), ((37233, 37280), 'tensorflow.reshape', 'tf.reshape', (['feature', '[DataH, DataW, channelsIn]'], {}), '(feature, [DataH, DataW, channelsIn])\n', (37243, 37280), True, 'import tensorflow as tf\n'), ((37299, 37327), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.float32'], {}), '(feature, tf.float32)\n', (37306, 37327), True, 'import tensorflow as tf\n'), ((37379, 37520), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (37393, 37520), True, 'import tensorflow as tf\n'), ((37694, 37733), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (37722, 37733), True, 'import tensorflow as tf\n'), ((38517, 38567), 'tensorflow.decode_raw', 'tf.decode_raw', (["featuresA['labels_raw']", 'tf.float32'], {}), "(featuresA['labels_raw'], tf.float32)\n", (38530, 38567), True, 'import tensorflow as tf\n'), ((39237, 39271), 'tensorflow.reshape', 'tf.reshape', (['feature', '[256, 256, 1]'], {}), '(feature, [256, 256, 1])\n', (39247, 39271), True, 'import tensorflow as tf\n'), ((39290, 39341), 'tensorflow.random_crop', 'tf.random_crop', (['feature', '[DataH, DataW, channelsIn]'], {}), '(feature, [DataH, DataW, channelsIn])\n', (39304, 39341), True, 'import tensorflow as tf\n'), ((39362, 39385), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['feature'], {}), '(feature)\n', (39376, 39385), True, 'import tensorflow as tf\n'), ((39397, 39419), 'tensorflow.reduce_max', 'tf.reduce_max', (['feature'], {}), '(feature)\n', (39410, 39419), True, 'import tensorflow as tf\n'), ((39431, 39448), 'tensorflow.maximum', 'tf.maximum', (['mx', '(1)'], {}), '(mx, 1)\n', (39441, 39448), True, 'import tensorflow as tf\n'), ((39644, 39679), 'tensorflow.cast', 'tf.cast', (['(feature / mx)', 'tf.complex64'], {}), '(feature / mx, tf.complex64)\n', (39651, 39679), True, 'import tensorflow as tf\n'), ((39697, 39738), 'GTools.TFGenerateRandomSinPhase', 'GT.TFGenerateRandomSinPhase', (['DataH', 'DataW'], {}), '(DataH, DataW)\n', (39724, 39738), True, 'import GTools as GT\n'), ((40028, 40057), 'tensorflow.reshape', 'tf.reshape', (['IQ', 'IQ.shape[0:2]'], {}), '(IQ, IQ.shape[0:2])\n', (40038, 40057), True, 'import tensorflow as tf\n'), ((40073, 40084), 'tensorflow.fft', 'tf.fft', (['IQ2'], {}), '(IQ2)\n', (40079, 40084), True, 'import tensorflow as tf\n'), ((40103, 40133), 'tensorflow.gather', 'tf.gather', (['feature', 'Id'], {'axis': '(1)'}), '(feature, Id, axis=1)\n', (40112, 40133), True, 'import tensorflow as tf\n'), ((40150, 40197), 'tensorflow.reshape', 'tf.reshape', (['feature', '[DataH, DataW, channelsIn]'], {}), '(feature, [DataH, DataW, channelsIn])\n', (40160, 40197), True, 'import tensorflow as tf\n'), ((40296, 40437), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (40310, 40437), True, 'import tensorflow as tf\n'), ((40611, 40650), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (40639, 40650), True, 'import tensorflow as tf\n'), ((40759, 40793), 'tensorflow.reshape', 'tf.reshape', (['feature', '[256, 256, 1]'], {}), '(feature, [256, 256, 1])\n', (40769, 40793), True, 'import tensorflow as tf\n'), ((40812, 40863), 'tensorflow.random_crop', 'tf.random_crop', (['feature', '[DataH, DataW, channelsIn]'], {}), '(feature, [DataH, DataW, channelsIn])\n', (40826, 40863), True, 'import tensorflow as tf\n'), ((40884, 40907), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['feature'], {}), '(feature)\n', (40898, 40907), True, 'import tensorflow as tf\n'), ((40919, 40941), 'tensorflow.reduce_max', 'tf.reduce_max', (['feature'], {}), '(feature)\n', (40932, 40941), True, 'import tensorflow as tf\n'), ((40953, 40970), 'tensorflow.maximum', 'tf.maximum', (['mx', '(1)'], {}), '(mx, 1)\n', (40963, 40970), True, 'import tensorflow as tf\n'), ((41166, 41201), 'tensorflow.cast', 'tf.cast', (['(feature / mx)', 'tf.complex64'], {}), '(feature / mx, tf.complex64)\n', (41173, 41201), True, 'import tensorflow as tf\n'), ((41219, 41260), 'GTools.TFGenerateRandomSinPhase', 'GT.TFGenerateRandomSinPhase', (['DataH', 'DataW'], {}), '(DataH, DataW)\n', (41246, 41260), True, 'import GTools as GT\n'), ((41550, 41579), 'tensorflow.reshape', 'tf.reshape', (['IQ', 'IQ.shape[0:2]'], {}), '(IQ, IQ.shape[0:2])\n', (41560, 41579), True, 'import tensorflow as tf\n'), ((41593, 41623), 'tensorflow.transpose', 'tf.transpose', (['IQ2'], {'perm': '[1, 0]'}), '(IQ2, perm=[1, 0])\n', (41605, 41623), True, 'import tensorflow as tf\n'), ((41640, 41651), 'tensorflow.fft', 'tf.fft', (['IQ2'], {}), '(IQ2)\n', (41646, 41651), True, 'import tensorflow as tf\n'), ((41670, 41700), 'tensorflow.gather', 'tf.gather', (['feature', 'Id'], {'axis': '(1)'}), '(feature, Id, axis=1)\n', (41679, 41700), True, 'import tensorflow as tf\n'), ((41717, 41751), 'tensorflow.transpose', 'tf.transpose', (['feature'], {'perm': '[1, 0]'}), '(feature, perm=[1, 0])\n', (41729, 41751), True, 'import tensorflow as tf\n'), ((41769, 41816), 'tensorflow.reshape', 'tf.reshape', (['feature', '[DataH, DataW, channelsIn]'], {}), '(feature, [DataH, DataW, channelsIn])\n', (41779, 41816), True, 'import tensorflow as tf\n'), ((41915, 42056), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (41929, 42056), True, 'import tensorflow as tf\n'), ((42230, 42269), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (42258, 42269), True, 'import tensorflow as tf\n'), ((42377, 42411), 'tensorflow.reshape', 'tf.reshape', (['feature', '[256, 256, 1]'], {}), '(feature, [256, 256, 1])\n', (42387, 42411), True, 'import tensorflow as tf\n'), ((42430, 42481), 'tensorflow.random_crop', 'tf.random_crop', (['feature', '[DataH, DataW, channelsIn]'], {}), '(feature, [DataH, DataW, channelsIn])\n', (42444, 42481), True, 'import tensorflow as tf\n'), ((42502, 42525), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['feature'], {}), '(feature)\n', (42516, 42525), True, 'import tensorflow as tf\n'), ((42537, 42559), 'tensorflow.reduce_max', 'tf.reduce_max', (['feature'], {}), '(feature)\n', (42550, 42559), True, 'import tensorflow as tf\n'), ((42571, 42588), 'tensorflow.maximum', 'tf.maximum', (['mx', '(1)'], {}), '(mx, 1)\n', (42581, 42588), True, 'import tensorflow as tf\n'), ((42784, 42819), 'tensorflow.cast', 'tf.cast', (['(feature / mx)', 'tf.complex64'], {}), '(feature / mx, tf.complex64)\n', (42791, 42819), True, 'import tensorflow as tf\n'), ((42837, 42878), 'GTools.TFGenerateRandomSinPhase', 'GT.TFGenerateRandomSinPhase', (['DataH', 'DataW'], {}), '(DataH, DataW)\n', (42864, 42878), True, 'import GTools as GT\n'), ((43302, 43331), 'tensorflow.reshape', 'tf.reshape', (['IQ', 'IQ.shape[0:2]'], {}), '(IQ, IQ.shape[0:2])\n', (43312, 43331), True, 'import tensorflow as tf\n'), ((43344, 43355), 'tensorflow.fft', 'tf.fft', (['IQ2'], {}), '(IQ2)\n', (43350, 43355), True, 'import tensorflow as tf\n'), ((43368, 43395), 'tensorflow.gather', 'tf.gather', (['IQ2', 'IdW'], {'axis': '(1)'}), '(IQ2, IdW, axis=1)\n', (43377, 43395), True, 'import tensorflow as tf\n'), ((43409, 43439), 'tensorflow.transpose', 'tf.transpose', (['IQ2'], {'perm': '[1, 0]'}), '(IQ2, perm=[1, 0])\n', (43421, 43439), True, 'import tensorflow as tf\n'), ((43456, 43467), 'tensorflow.fft', 'tf.fft', (['IQ2'], {}), '(IQ2)\n', (43462, 43467), True, 'import tensorflow as tf\n'), ((43486, 43517), 'tensorflow.gather', 'tf.gather', (['feature', 'IdH'], {'axis': '(1)'}), '(feature, IdH, axis=1)\n', (43495, 43517), True, 'import tensorflow as tf\n'), ((43534, 43568), 'tensorflow.transpose', 'tf.transpose', (['feature'], {'perm': '[1, 0]'}), '(feature, perm=[1, 0])\n', (43546, 43568), True, 'import tensorflow as tf\n'), ((43586, 43633), 'tensorflow.reshape', 'tf.reshape', (['feature', '[DataH, DataW, channelsIn]'], {}), '(feature, [DataH, DataW, channelsIn])\n', (43596, 43633), True, 'import tensorflow as tf\n'), ((43732, 43873), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (43746, 43873), True, 'import tensorflow as tf\n'), ((44047, 44086), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (44075, 44086), True, 'import tensorflow as tf\n'), ((44169, 44210), 'tensorflow.train.string_input_producer', 'tf.train.string_input_producer', (['filenames'], {}), '(filenames)\n', (44199, 44210), True, 'import tensorflow as tf\n'), ((44630, 44679), 'tensorflow.decode_raw', 'tf.decode_raw', (["featuresA2['data_raw']", 'tf.float32'], {}), "(featuresA2['data_raw'], tf.float32)\n", (44643, 44679), True, 'import tensorflow as tf\n'), ((44699, 44746), 'tensorflow.reshape', 'tf.reshape', (['feature', '[DataH, DataW, channelsIn]'], {}), '(feature, [DataH, DataW, channelsIn])\n', (44709, 44746), True, 'import tensorflow as tf\n'), ((44766, 44814), 'tensorflow.reshape', 'tf.reshape', (['feature2', '[DataH, DataW, channelsIn]'], {}), '(feature2, [DataH, DataW, channelsIn])\n', (44776, 44814), True, 'import tensorflow as tf\n'), ((45091, 45151), 'tensorflow.concat', 'tf.concat', (['[feature, feature * 0, feature2, feature2 * 0]', '(2)'], {}), '([feature, feature * 0, feature2, feature2 * 0], 2)\n', (45100, 45151), True, 'import tensorflow as tf\n'), ((45195, 45336), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (45209, 45336), True, 'import tensorflow as tf\n'), ((45510, 45549), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (45538, 45549), True, 'import tensorflow as tf\n'), ((45663, 45710), 'tensorflow.reshape', 'tf.reshape', (['feature', '[DataH, DataW, channelsIn]'], {}), '(feature, [DataH, DataW, channelsIn])\n', (45673, 45710), True, 'import tensorflow as tf\n'), ((45729, 45757), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.float32'], {}), '(feature, tf.float32)\n', (45736, 45757), True, 'import tensorflow as tf\n'), ((45784, 45835), 'tensorflow.reshape', 'tf.reshape', (['labels', '[LabelsH, LabelsW, channelsOut]'], {}), '(labels, [LabelsH, LabelsW, channelsOut])\n', (45794, 45835), True, 'import tensorflow as tf\n'), ((45852, 45879), 'tensorflow.cast', 'tf.cast', (['labels', 'tf.float32'], {}), '(labels, tf.float32)\n', (45859, 45879), True, 'import tensorflow as tf\n'), ((45944, 46085), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (45958, 46085), True, 'import tensorflow as tf\n'), ((46259, 46298), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (46287, 46298), True, 'import tensorflow as tf\n'), ((46517, 46536), 'tensorflow.constant', 'tf.constant', (['NMapCR'], {}), '(NMapCR)\n', (46528, 46536), True, 'import tensorflow as tf\n'), ((46554, 46614), 'tensorflow.gather', 'tf.gather', (['feature', 'NMapCR'], {'validate_indices': 'None', 'name': 'None'}), '(feature, NMapCR, validate_indices=None, name=None)\n', (46563, 46614), True, 'import tensorflow as tf\n'), ((46631, 46678), 'tensorflow.reshape', 'tf.reshape', (['feature', '[DataH, DataW, channelsIn]'], {}), '(feature, [DataH, DataW, channelsIn])\n', (46641, 46678), True, 'import tensorflow as tf\n'), ((46697, 46725), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.float32'], {}), '(feature, tf.float32)\n', (46704, 46725), True, 'import tensorflow as tf\n'), ((46752, 46795), 'tensorflow.reshape', 'tf.reshape', (['labels', '[128, 128, channelsOut]'], {}), '(labels, [128, 128, channelsOut])\n', (46762, 46795), True, 'import tensorflow as tf\n'), ((46885, 46935), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['labels', '[LabelsH, LabelsW]'], {}), '(labels, [LabelsH, LabelsW])\n', (46907, 46935), True, 'import tensorflow as tf\n'), ((47020, 47047), 'tensorflow.cast', 'tf.cast', (['labels', 'tf.float32'], {}), '(labels, tf.float32)\n', (47027, 47047), True, 'import tensorflow as tf\n'), ((47112, 47253), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (47126, 47253), True, 'import tensorflow as tf\n'), ((47427, 47466), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (47455, 47466), True, 'import tensorflow as tf\n'), ((47796, 47815), 'tensorflow.constant', 'tf.constant', (['NMapCR'], {}), '(NMapCR)\n', (47807, 47815), True, 'import tensorflow as tf\n'), ((47833, 47893), 'tensorflow.gather', 'tf.gather', (['feature', 'NMapCR'], {'validate_indices': 'None', 'name': 'None'}), '(feature, NMapCR, validate_indices=None, name=None)\n', (47842, 47893), True, 'import tensorflow as tf\n'), ((47910, 47957), 'tensorflow.reshape', 'tf.reshape', (['feature', '[DataH, DataW, channelsIn]'], {}), '(feature, [DataH, DataW, channelsIn])\n', (47920, 47957), True, 'import tensorflow as tf\n'), ((47976, 48004), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.float32'], {}), '(feature, tf.float32)\n', (47983, 48004), True, 'import tensorflow as tf\n'), ((48031, 48082), 'tensorflow.reshape', 'tf.reshape', (['labels', '[LabelsH, LabelsW, channelsOut]'], {}), '(labels, [LabelsH, LabelsW, channelsOut])\n', (48041, 48082), True, 'import tensorflow as tf\n'), ((48099, 48126), 'tensorflow.cast', 'tf.cast', (['labels', 'tf.float32'], {}), '(labels, tf.float32)\n', (48106, 48126), True, 'import tensorflow as tf\n'), ((48191, 48332), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (48205, 48332), True, 'import tensorflow as tf\n'), ((48506, 48545), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (48534, 48545), True, 'import tensorflow as tf\n'), ((48658, 48705), 'tensorflow.reshape', 'tf.reshape', (['feature', '[DataH, DataW, channelsIn]'], {}), '(feature, [DataH, DataW, channelsIn])\n', (48668, 48705), True, 'import tensorflow as tf\n'), ((48724, 48752), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.float32'], {}), '(feature, tf.float32)\n', (48731, 48752), True, 'import tensorflow as tf\n'), ((48779, 48830), 'tensorflow.reshape', 'tf.reshape', (['labels', '[LabelsH, LabelsW, channelsOut]'], {}), '(labels, [LabelsH, LabelsW, channelsOut])\n', (48789, 48830), True, 'import tensorflow as tf\n'), ((48847, 48874), 'tensorflow.cast', 'tf.cast', (['labels', 'tf.float32'], {}), '(labels, tf.float32)\n', (48854, 48874), True, 'import tensorflow as tf\n'), ((48939, 49080), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (48953, 49080), True, 'import tensorflow as tf\n'), ((49254, 49293), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (49282, 49293), True, 'import tensorflow as tf\n'), ((1433, 1464), 'tensorflow.transpose', 'tf.transpose', (['X'], {'perm': '[1, 0, 2]'}), '(X, perm=[1, 0, 2])\n', (1445, 1464), True, 'import tensorflow as tf\n'), ((1485, 1504), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (1494, 1504), True, 'import tensorflow as tf\n'), ((1570, 1639), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': 'TFL.shape[0]', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=TFL.shape[0], dtype=tf.int32)\n', (1587, 1639), True, 'import tensorflow as tf\n'), ((1656, 1704), 'tensorflow.slice', 'tf.slice', (['TFL', '[Idx[0], 0, 0, 0]', '[1, -1, -1, 1]'], {}), '(TFL, [Idx[0], 0, 0, 0], [1, -1, -1, 1])\n', (1664, 1704), True, 'import tensorflow as tf\n'), ((1716, 1764), 'tensorflow.slice', 'tf.slice', (['TFL', '[Idx[0], 0, 0, 1]', '[1, -1, -1, 1]'], {}), '(TFL, [Idx[0], 0, 0, 1], [1, -1, -1, 1])\n', (1724, 1764), True, 'import tensorflow as tf\n'), ((1789, 1818), 'tensorflow.cast', 'tf.cast', (['labelR', 'tf.complex64'], {}), '(labelR, tf.complex64)\n', (1796, 1818), True, 'import tensorflow as tf\n'), ((1837, 1866), 'tensorflow.cast', 'tf.cast', (['labelI', 'tf.complex64'], {}), '(labelI, tf.complex64)\n', (1844, 1866), True, 'import tensorflow as tf\n'), ((1884, 1941), 'tensorflow.cast', 'tf.cast', (['((labelR + 1.0j * labelI) / 30000.0)', 'tf.complex64'], {}), '((labelR + 1.0j * labelI) / 30000.0, tf.complex64)\n', (1891, 1941), True, 'import tensorflow as tf\n'), ((2231, 2271), 'tensorflow.reshape', 'tf.reshape', (['label', '[LabelsH, LabelsW, 1]'], {}), '(label, [LabelsH, LabelsW, 1])\n', (2241, 2271), True, 'import tensorflow as tf\n'), ((2293, 2331), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['label'], {}), '(label)\n', (2324, 2331), True, 'import tensorflow as tf\n'), ((2352, 2387), 'tensorflow.image.random_flip_up_down', 'tf.image.random_flip_up_down', (['label'], {}), '(label)\n', (2380, 2387), True, 'import tensorflow as tf\n'), ((2403, 2425), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {}), '([1])\n', (2420, 2425), True, 'import tensorflow as tf\n'), ((2669, 2694), 'tensorflow.multiply', 'tf.multiply', (['label', 'TFMsk'], {}), '(label, TFMsk)\n', (2680, 2694), True, 'import tensorflow as tf\n'), ((2838, 2866), 'tensorflow.multiply', 'tf.multiply', (['feature', 'TFSens'], {}), '(feature, TFSens)\n', (2849, 2866), True, 'import tensorflow as tf\n'), ((4220, 4236), 'numpy.int16', 'np.int16', (['labels'], {}), '(labels)\n', (4228, 4236), True, 'import numpy as np\n'), ((4264, 4282), 'numpy.int16', 'np.int16', (['features'], {}), '(features)\n', (4272, 4282), True, 'import numpy as np\n'), ((4710, 4761), 'tensorflow.reshape', 'tf.reshape', (['label', '[LabelsH, LabelsW, TFL.shape[3]]'], {}), '(label, [LabelsH, LabelsW, TFL.shape[3]])\n', (4720, 4761), True, 'import tensorflow as tf\n'), ((4796, 4836), 'tensorflow.reshape', 'tf.reshape', (['label', '[LabelsH, LabelsW, 1]'], {}), '(label, [LabelsH, LabelsW, 1])\n', (4806, 4836), True, 'import tensorflow as tf\n'), ((4889, 4942), 'tensorflow.reshape', 'tf.reshape', (['feature', '[LabelsH, LabelsW, TFF.shape[3]]'], {}), '(feature, [LabelsH, LabelsW, TFF.shape[3]])\n', (4899, 4942), True, 'import tensorflow as tf\n'), ((4979, 5021), 'tensorflow.reshape', 'tf.reshape', (['feature', '[LabelsH, LabelsW, 1]'], {}), '(feature, [LabelsH, LabelsW, 1])\n', (4989, 5021), True, 'import tensorflow as tf\n'), ((6219, 6235), 'numpy.int16', 'np.int16', (['labels'], {}), '(labels)\n', (6227, 6235), True, 'import numpy as np\n'), ((6263, 6281), 'numpy.int16', 'np.int16', (['features'], {}), '(features)\n', (6271, 6281), True, 'import numpy as np\n'), ((6668, 6719), 'tensorflow.reshape', 'tf.reshape', (['label', '[LabelsH, LabelsW, TFL.shape[3]]'], {}), '(label, [LabelsH, LabelsW, TFL.shape[3]])\n', (6678, 6719), True, 'import tensorflow as tf\n'), ((6754, 6794), 'tensorflow.reshape', 'tf.reshape', (['label', '[LabelsH, LabelsW, 1]'], {}), '(label, [LabelsH, LabelsW, 1])\n', (6764, 6794), True, 'import tensorflow as tf\n'), ((6847, 6900), 'tensorflow.reshape', 'tf.reshape', (['feature', '[LabelsH, LabelsW, TFF.shape[3]]'], {}), '(feature, [LabelsH, LabelsW, TFF.shape[3]])\n', (6857, 6900), True, 'import tensorflow as tf\n'), ((6937, 6979), 'tensorflow.reshape', 'tf.reshape', (['feature', '[LabelsH, LabelsW, 1]'], {}), '(feature, [LabelsH, LabelsW, 1])\n', (6947, 6979), True, 'import tensorflow as tf\n'), ((7945, 7967), 'numpy.complex64', 'np.complex64', (['SensMskA'], {}), '(SensMskA)\n', (7957, 7967), True, 'import numpy as np\n'), ((7998, 8020), 'numpy.complex64', 'np.complex64', (['SensMskB'], {}), '(SensMskB)\n', (8010, 8020), True, 'import numpy as np\n'), ((8639, 8650), 'numpy.int16', 'np.int16', (['I'], {}), '(I)\n', (8647, 8650), True, 'import numpy as np\n'), ((9775, 9798), 'tensorflow.reduce_max', 'tf.reduce_max', (['featureA'], {}), '(featureA)\n', (9788, 9798), True, 'import tensorflow as tf\n'), ((9825, 9848), 'tensorflow.reduce_max', 'tf.reduce_max', (['featureB'], {}), '(featureB)\n', (9838, 9848), True, 'import tensorflow as tf\n'), ((10413, 10438), 'tensorflow.reshape', 'tf.reshape', (['QA', '[H, W, 1]'], {}), '(QA, [H, W, 1])\n', (10423, 10438), True, 'import tensorflow as tf\n'), ((10468, 10493), 'tensorflow.reshape', 'tf.reshape', (['QB', '[H, W, 1]'], {}), '(QB, [H, W, 1])\n', (10478, 10493), True, 'import tensorflow as tf\n'), ((11505, 11554), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['label_indexes'], {}), '(label_indexes)\n', (11539, 11554), True, 'import tensorflow as tf\n'), ((11742, 11772), 'tensorflow.cast', 'tf.cast', (['label_index', 'tf.int32'], {}), '(label_index, tf.int32)\n', (11749, 11772), True, 'import tensorflow as tf\n'), ((11838, 11887), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['BankK_indexes'], {}), '(BankK_indexes)\n', (11872, 11887), True, 'import tensorflow as tf\n'), ((12082, 12113), 'tensorflow.cast', 'tf.cast', (['label_indexK', 'tf.int32'], {}), '(label_indexK, tf.int32)\n', (12089, 12113), True, 'import tensorflow as tf\n'), ((12172, 12237), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': 'BankSize', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=BankSize, dtype=tf.int32)\n', (12189, 12237), True, 'import tensorflow as tf\n'), ((12253, 12318), 'tensorflow.random_uniform', 'tf.random_uniform', (['[1]'], {'minval': '(0)', 'maxval': 'BankSize', 'dtype': 'tf.int32'}), '([1], minval=0, maxval=BankSize, dtype=tf.int32)\n', (12270, 12318), True, 'import tensorflow as tf\n'), ((14347, 14490), 'tensorflow.train.batch', 'tf.train.batch', (['[featureX, labelX]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([featureX, labelX], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (14361, 14490), True, 'import tensorflow as tf\n'), ((14518, 14609), 'GTools.TF_TSNUFFT_Run', 'GT.TF_TSNUFFT_Run', (['CurIWithPhaseA', 'SNcA', 'paddings', 'nTraj', 'nTSC', 'nCh', 'sp_R', 'sp_I', 'TSBFXA'], {}), '(CurIWithPhaseA, SNcA, paddings, nTraj, nTSC, nCh, sp_R,\n sp_I, TSBFXA)\n', (14535, 14609), True, 'import GTools as GT\n'), ((14619, 14710), 'GTools.TF_TSNUFFT_Run', 'GT.TF_TSNUFFT_Run', (['CurIWithPhaseB', 'SNcB', 'paddings', 'nTraj', 'nTSC', 'nCh', 'sp_R', 'sp_I', 'TSBFXB'], {}), '(CurIWithPhaseB, SNcB, paddings, nTraj, nTSC, nCh, sp_R,\n sp_I, TSBFXB)\n', (14636, 14710), True, 'import GTools as GT\n'), ((14769, 14803), 'tensorflow.transpose', 'tf.transpose', (['feature'], {'perm': '[1, 0]'}), '(feature, perm=[1, 0])\n', (14781, 14803), True, 'import tensorflow as tf\n'), ((14817, 14857), 'tensorflow.reshape', 'tf.reshape', (['feature', '[nTraj * nCh, 1, 1]'], {}), '(feature, [nTraj * nCh, 1, 1])\n', (14827, 14857), True, 'import tensorflow as tf\n'), ((14912, 14963), 'tensorflow.concat', 'tf.concat', (['[CurIWithPhaseA, CurIWithPhaseB]'], {'axis': '(1)'}), '([CurIWithPhaseA, CurIWithPhaseB], axis=1)\n', (14921, 14963), True, 'import tensorflow as tf\n'), ((15079, 15220), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (15093, 15220), True, 'import tensorflow as tf\n'), ((15830, 15851), 'numpy.complex64', 'np.complex64', (['SensMsk'], {}), '(SensMsk)\n', (15842, 15851), True, 'import numpy as np\n'), ((16701, 16712), 'numpy.int16', 'np.int16', (['I'], {}), '(I)\n', (16709, 16712), True, 'import numpy as np\n'), ((18089, 18113), 'tensorflow.reshape', 'tf.reshape', (['Q', '[H, W, 1]'], {}), '(Q, [H, W, 1])\n', (18099, 18113), True, 'import tensorflow as tf\n'), ((19444, 19493), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['label_indexes'], {}), '(label_indexes)\n', (19478, 19493), True, 'import tensorflow as tf\n'), ((19681, 19711), 'tensorflow.cast', 'tf.cast', (['label_index', 'tf.int32'], {}), '(label_index, tf.int32)\n', (19688, 19711), True, 'import tensorflow as tf\n'), ((19739, 19788), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['BankK_indexes'], {}), '(BankK_indexes)\n', (19773, 19788), True, 'import tensorflow as tf\n'), ((19983, 20014), 'tensorflow.cast', 'tf.cast', (['label_indexK', 'tf.int32'], {}), '(label_indexK, tf.int32)\n', (19990, 20014), True, 'import tensorflow as tf\n'), ((21270, 21413), 'tensorflow.train.batch', 'tf.train.batch', (['[featureX, labelX]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([featureX, labelX], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (21284, 21413), True, 'import tensorflow as tf\n'), ((21822, 21963), 'tensorflow.train.batch', 'tf.train.batch', (['[feature, label]'], {'batch_size': 'batch_size', 'num_threads': '(4)', 'capacity': '(capacity_factor * batch_size)', 'name': '"""labels_and_features"""'}), "([feature, label], batch_size=batch_size, num_threads=4,\n capacity=capacity_factor * batch_size, name='labels_and_features')\n", (21836, 21963), True, 'import tensorflow as tf\n'), ((25671, 25684), 'numpy.float32', 'np.float32', (['I'], {}), '(I)\n', (25681, 25684), True, 'import numpy as np\n'), ((26124, 26156), 'tensorflow.reshape', 'tf.reshape', (['Q', '[DataH, DataW, 1]'], {}), '(Q, [DataH, DataW, 1])\n', (26134, 26156), True, 'import tensorflow as tf\n'), ((27179, 27192), 'numpy.float32', 'np.float32', (['I'], {}), '(I)\n', (27189, 27192), True, 'import numpy as np\n'), ((27632, 27664), 'tensorflow.reshape', 'tf.reshape', (['Q', '[DataH, DataW, 1]'], {}), '(Q, [DataH, DataW, 1])\n', (27642, 27664), True, 'import tensorflow as tf\n'), ((28601, 28617), 'numpy.float32', 'np.float32', (['Mask'], {}), '(Mask)\n', (28611, 28617), True, 'import numpy as np\n'), ((28693, 28706), 'numpy.float32', 'np.float32', (['I'], {}), '(I)\n', (28703, 28706), True, 'import numpy as np\n'), ((29072, 29104), 'tensorflow.reshape', 'tf.reshape', (['Q', '[DataH, DataW, 1]'], {}), '(Q, [DataH, DataW, 1])\n', (29082, 29104), True, 'import tensorflow as tf\n'), ((30243, 30259), 'numpy.float32', 'np.float32', (['Mask'], {}), '(Mask)\n', (30253, 30259), True, 'import numpy as np\n'), ((30335, 30348), 'numpy.float32', 'np.float32', (['I'], {}), '(I)\n', (30345, 30348), True, 'import numpy as np\n'), ((30714, 30746), 'tensorflow.reshape', 'tf.reshape', (['Q', '[DataH, DataW, 1]'], {}), '(Q, [DataH, DataW, 1])\n', (30724, 30746), True, 'import tensorflow as tf\n'), ((31495, 31508), 'numpy.float32', 'np.float32', (['I'], {}), '(I)\n', (31505, 31508), True, 'import numpy as np\n'), ((31879, 31911), 'tensorflow.reshape', 'tf.reshape', (['Q', '[DataH, DataW, 1]'], {}), '(Q, [DataH, DataW, 1])\n', (31889, 31911), True, 'import tensorflow as tf\n'), ((32662, 32675), 'numpy.float32', 'np.float32', (['I'], {}), '(I)\n', (32672, 32675), True, 'import numpy as np\n'), ((33046, 33078), 'tensorflow.reshape', 'tf.reshape', (['Q', '[DataH, DataW, 1]'], {}), '(Q, [DataH, DataW, 1])\n', (33056, 33078), True, 'import tensorflow as tf\n'), ((33826, 33839), 'numpy.float32', 'np.float32', (['I'], {}), '(I)\n', (33836, 33839), True, 'import numpy as np\n'), ((34153, 34185), 'tensorflow.reshape', 'tf.reshape', (['Q', '[DataH, DataW, 1]'], {}), '(Q, [DataH, DataW, 1])\n', (34163, 34185), True, 'import tensorflow as tf\n'), ((34897, 34910), 'numpy.float32', 'np.float32', (['I'], {}), '(I)\n', (34907, 34910), True, 'import numpy as np\n'), ((35224, 35256), 'tensorflow.reshape', 'tf.reshape', (['Q', '[DataH, DataW, 1]'], {}), '(Q, [DataH, DataW, 1])\n', (35234, 35256), True, 'import tensorflow as tf\n'), ((39758, 39799), 'tensorflow.reshape', 'tf.reshape', (['Q', '[DataH, DataW, channelsIn]'], {}), '(Q, [DataH, DataW, channelsIn])\n', (39768, 39799), True, 'import tensorflow as tf\n'), ((41280, 41321), 'tensorflow.reshape', 'tf.reshape', (['Q', '[DataH, DataW, channelsIn]'], {}), '(Q, [DataH, DataW, channelsIn])\n', (41290, 41321), True, 'import tensorflow as tf\n'), ((42898, 42939), 'tensorflow.reshape', 'tf.reshape', (['Q', '[DataH, DataW, channelsIn]'], {}), '(Q, [DataH, DataW, channelsIn])\n', (42908, 42939), True, 'import tensorflow as tf\n'), ((44941, 44969), 'tensorflow.cast', 'tf.cast', (['feature', 'tf.float32'], {}), '(feature, tf.float32)\n', (44948, 44969), True, 'import tensorflow as tf\n'), ((44970, 44992), 'tensorflow.reduce_max', 'tf.reduce_max', (['feature'], {}), '(feature)\n', (44983, 44992), True, 'import tensorflow as tf\n'), ((45012, 45041), 'tensorflow.cast', 'tf.cast', (['feature2', 'tf.float32'], {}), '(feature2, tf.float32)\n', (45019, 45041), True, 'import tensorflow as tf\n'), ((45042, 45064), 'tensorflow.reduce_max', 'tf.reduce_max', (['feature'], {}), '(feature)\n', (45055, 45064), True, 'import tensorflow as tf\n'), ((644, 678), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (657, 678), False, 'import time\n'), ((1008, 1042), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (1021, 1042), False, 'import time\n'), ((1536, 1552), 'numpy.int32', 'np.int32', (['labels'], {}), '(labels)\n', (1544, 1552), True, 'import numpy as np\n'), ((2557, 2578), 'numpy.complex64', 'np.complex64', (['SensMsk'], {}), '(SensMsk)\n', (2569, 2578), True, 'import numpy as np\n'), ((2613, 2631), 'numpy.complex64', 'np.complex64', (['Sens'], {}), '(Sens)\n', (2625, 2631), True, 'import numpy as np\n'), ((2791, 2804), 'tensorflow.abs', 'tf.abs', (['label'], {}), '(label)\n', (2797, 2804), True, 'import tensorflow as tf\n'), ((3248, 3282), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (3261, 3282), False, 'import time\n'), ((3735, 3769), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (3748, 3769), False, 'import time\n'), ((3812, 3846), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (3825, 3846), False, 'import time\n'), ((4149, 4183), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (4162, 4183), False, 'import time\n'), ((5349, 5383), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (5362, 5383), False, 'import time\n'), ((5783, 5817), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (5796, 5817), False, 'import time\n'), ((5860, 5894), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (5873, 5894), False, 'import time\n'), ((6148, 6182), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (6161, 6182), False, 'import time\n'), ((8065, 8099), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (8078, 8099), False, 'import time\n'), ((8523, 8557), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (8536, 8557), False, 'import time\n'), ((9225, 9246), 'tensorflow.identity', 'tf.identity', (['featureA'], {}), '(featureA)\n', (9236, 9246), True, 'import tensorflow as tf\n'), ((9256, 9280), 'tensorflow.image.rot90', 'tf.image.rot90', (['featureA'], {}), '(featureA)\n', (9270, 9280), True, 'import tensorflow as tf\n'), ((9480, 9501), 'tensorflow.identity', 'tf.identity', (['featureB'], {}), '(featureB)\n', (9491, 9501), True, 'import tensorflow as tf\n'), ((9511, 9535), 'tensorflow.image.rot90', 'tf.image.rot90', (['featureB'], {}), '(featureB)\n', (9525, 9535), True, 'import tensorflow as tf\n'), ((12334, 12353), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (12343, 12353), True, 'import tensorflow as tf\n'), ((12388, 12425), 'tensorflow.greater', 'tf.greater', (['label_indexK', 'label_index'], {}), '(label_indexK, label_index)\n', (12398, 12425), True, 'import tensorflow as tf\n'), ((13682, 13731), 'tensorflow.slice', 'tf.slice', (['Bank', '[IdxAF, 0, 0, 0]', '[1, -1, -1, -1]'], {}), '(Bank, [IdxAF, 0, 0, 0], [1, -1, -1, -1])\n', (13690, 13731), True, 'import tensorflow as tf\n'), ((13752, 13788), 'tensorflow.reshape', 'tf.reshape', (['featureAX', '[DataH, 1, 1]'], {}), '(featureAX, [DataH, 1, 1])\n', (13762, 13788), True, 'import tensorflow as tf\n'), ((13817, 13866), 'tensorflow.slice', 'tf.slice', (['Bank', '[IdxBF, 0, 0, 0]', '[1, -1, -1, -1]'], {}), '(Bank, [IdxBF, 0, 0, 0], [1, -1, -1, -1])\n', (13825, 13866), True, 'import tensorflow as tf\n'), ((13887, 13923), 'tensorflow.reshape', 'tf.reshape', (['featureBX', '[DataH, 1, 1]'], {}), '(featureBX, [DataH, 1, 1])\n', (13897, 13923), True, 'import tensorflow as tf\n'), ((14024, 14074), 'tensorflow.slice', 'tf.slice', (['LBank', '[IdxAF, 0, 0, 0]', '[1, -1, -1, -1]'], {}), '(LBank, [IdxAF, 0, 0, 0], [1, -1, -1, -1])\n', (14032, 14074), True, 'import tensorflow as tf\n'), ((14093, 14123), 'tensorflow.reshape', 'tf.reshape', (['labelAX', '[H, W, 2]'], {}), '(labelAX, [H, W, 2])\n', (14103, 14123), True, 'import tensorflow as tf\n'), ((14150, 14200), 'tensorflow.slice', 'tf.slice', (['LBank', '[IdxBF, 0, 0, 0]', '[1, -1, -1, -1]'], {}), '(LBank, [IdxBF, 0, 0, 0], [1, -1, -1, -1])\n', (14158, 14200), True, 'import tensorflow as tf\n'), ((14219, 14249), 'tensorflow.reshape', 'tf.reshape', (['labelBX', '[H, W, 2]'], {}), '(labelBX, [H, W, 2])\n', (14229, 14249), True, 'import tensorflow as tf\n'), ((14276, 14313), 'tensorflow.concat', 'tf.concat', (['[labelAX, labelBX]'], {'axis': '(1)'}), '([labelAX, labelBX], axis=1)\n', (14285, 14313), True, 'import tensorflow as tf\n'), ((15896, 15930), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (15909, 15930), False, 'import time\n'), ((16469, 16503), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (16482, 16503), False, 'import time\n'), ((17149, 17169), 'tensorflow.identity', 'tf.identity', (['feature'], {}), '(feature)\n', (17160, 17169), True, 'import tensorflow as tf\n'), ((17179, 17202), 'tensorflow.image.rot90', 'tf.image.rot90', (['feature'], {}), '(feature)\n', (17193, 17202), True, 'import tensorflow as tf\n'), ((18136, 18158), 'tensorflow.real', 'tf.real', (['CurIWithPhase'], {}), '(CurIWithPhase)\n', (18143, 18158), True, 'import tensorflow as tf\n'), ((18159, 18181), 'tensorflow.imag', 'tf.imag', (['CurIWithPhase'], {}), '(CurIWithPhase)\n', (18166, 18181), True, 'import tensorflow as tf\n'), ((20032, 20051), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (20041, 20051), True, 'import tensorflow as tf\n'), ((20086, 20123), 'tensorflow.greater', 'tf.greater', (['label_indexK', 'label_index'], {}), '(label_indexK, label_index)\n', (20096, 20123), True, 'import tensorflow as tf\n'), ((20895, 20950), 'tensorflow.slice', 'tf.slice', (['Bank', '[label_index, 0, 0, 0]', '[1, -1, -1, -1]'], {}), '(Bank, [label_index, 0, 0, 0], [1, -1, -1, -1])\n', (20903, 20950), True, 'import tensorflow as tf\n'), ((20970, 21005), 'tensorflow.reshape', 'tf.reshape', (['featureX', '[DataH, 1, 1]'], {}), '(featureX, [DataH, 1, 1])\n', (20980, 21005), True, 'import tensorflow as tf\n'), ((21134, 21190), 'tensorflow.slice', 'tf.slice', (['LBank', '[label_index, 0, 0, 0]', '[1, -1, -1, -1]'], {}), '(LBank, [label_index, 0, 0, 0], [1, -1, -1, -1])\n', (21142, 21190), True, 'import tensorflow as tf\n'), ((21208, 21237), 'tensorflow.reshape', 'tf.reshape', (['labelX', '[H, W, 2]'], {}), '(labelX, [H, W, 2])\n', (21218, 21237), True, 'import tensorflow as tf\n'), ((22771, 22805), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (22784, 22805), False, 'import time\n'), ((22867, 22901), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (22880, 22901), False, 'import time\n'), ((23064, 23098), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (23077, 23098), False, 'import time\n'), ((23171, 23205), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (23184, 23205), False, 'import time\n'), ((26188, 26210), 'tensorflow.real', 'tf.real', (['CurIWithPhase'], {}), '(CurIWithPhase)\n', (26195, 26210), True, 'import tensorflow as tf\n'), ((26211, 26233), 'tensorflow.imag', 'tf.imag', (['CurIWithPhase'], {}), '(CurIWithPhase)\n', (26218, 26233), True, 'import tensorflow as tf\n'), ((26527, 26537), 'tensorflow.real', 'tf.real', (['F'], {}), '(F)\n', (26534, 26537), True, 'import tensorflow as tf\n'), ((26538, 26548), 'tensorflow.imag', 'tf.imag', (['F'], {}), '(F)\n', (26545, 26548), True, 'import tensorflow as tf\n'), ((27696, 27718), 'tensorflow.real', 'tf.real', (['CurIWithPhase'], {}), '(CurIWithPhase)\n', (27703, 27718), True, 'import tensorflow as tf\n'), ((27719, 27741), 'tensorflow.imag', 'tf.imag', (['CurIWithPhase'], {}), '(CurIWithPhase)\n', (27726, 27741), True, 'import tensorflow as tf\n'), ((27917, 27927), 'tensorflow.real', 'tf.real', (['F'], {}), '(F)\n', (27924, 27927), True, 'import tensorflow as tf\n'), ((27928, 27938), 'tensorflow.imag', 'tf.imag', (['F'], {}), '(F)\n', (27935, 27938), True, 'import tensorflow as tf\n'), ((29203, 29225), 'tensorflow.real', 'tf.real', (['CurIWithPhase'], {}), '(CurIWithPhase)\n', (29210, 29225), True, 'import tensorflow as tf\n'), ((29226, 29248), 'tensorflow.imag', 'tf.imag', (['CurIWithPhase'], {}), '(CurIWithPhase)\n', (29233, 29248), True, 'import tensorflow as tf\n'), ((29560, 29570), 'tensorflow.real', 'tf.real', (['F'], {}), '(F)\n', (29567, 29570), True, 'import tensorflow as tf\n'), ((29571, 29581), 'tensorflow.imag', 'tf.imag', (['F'], {}), '(F)\n', (29578, 29581), True, 'import tensorflow as tf\n'), ((30845, 30867), 'tensorflow.real', 'tf.real', (['CurIWithPhase'], {}), '(CurIWithPhase)\n', (30852, 30867), True, 'import tensorflow as tf\n'), ((30868, 30890), 'tensorflow.imag', 'tf.imag', (['CurIWithPhase'], {}), '(CurIWithPhase)\n', (30875, 30890), True, 'import tensorflow as tf\n'), ((31032, 31042), 'tensorflow.real', 'tf.real', (['F'], {}), '(F)\n', (31039, 31042), True, 'import tensorflow as tf\n'), ((31043, 31053), 'tensorflow.imag', 'tf.imag', (['F'], {}), '(F)\n', (31050, 31053), True, 'import tensorflow as tf\n'), ((31943, 31954), 'tensorflow.real', 'tf.real', (['IQ'], {}), '(IQ)\n', (31950, 31954), True, 'import tensorflow as tf\n'), ((31955, 31966), 'tensorflow.imag', 'tf.imag', (['IQ'], {}), '(IQ)\n', (31962, 31966), True, 'import tensorflow as tf\n'), ((32185, 32201), 'tensorflow.real', 'tf.real', (['feature'], {}), '(feature)\n', (32192, 32201), True, 'import tensorflow as tf\n'), ((32202, 32218), 'tensorflow.imag', 'tf.imag', (['feature'], {}), '(feature)\n', (32209, 32218), True, 'import tensorflow as tf\n'), ((33110, 33121), 'tensorflow.real', 'tf.real', (['IQ'], {}), '(IQ)\n', (33117, 33121), True, 'import tensorflow as tf\n'), ((33122, 33133), 'tensorflow.imag', 'tf.imag', (['IQ'], {}), '(IQ)\n', (33129, 33133), True, 'import tensorflow as tf\n'), ((33350, 33366), 'tensorflow.real', 'tf.real', (['feature'], {}), '(feature)\n', (33357, 33366), True, 'import tensorflow as tf\n'), ((33367, 33383), 'tensorflow.imag', 'tf.imag', (['feature'], {}), '(feature)\n', (33374, 33383), True, 'import tensorflow as tf\n'), ((34217, 34228), 'tensorflow.real', 'tf.real', (['IQ'], {}), '(IQ)\n', (34224, 34228), True, 'import tensorflow as tf\n'), ((34229, 34240), 'tensorflow.imag', 'tf.imag', (['IQ'], {}), '(IQ)\n', (34236, 34240), True, 'import tensorflow as tf\n'), ((34421, 34437), 'tensorflow.real', 'tf.real', (['feature'], {}), '(feature)\n', (34428, 34437), True, 'import tensorflow as tf\n'), ((34438, 34454), 'tensorflow.imag', 'tf.imag', (['feature'], {}), '(feature)\n', (34445, 34454), True, 'import tensorflow as tf\n'), ((35288, 35299), 'tensorflow.real', 'tf.real', (['IQ'], {}), '(IQ)\n', (35295, 35299), True, 'import tensorflow as tf\n'), ((35300, 35311), 'tensorflow.imag', 'tf.imag', (['IQ'], {}), '(IQ)\n', (35307, 35311), True, 'import tensorflow as tf\n'), ((35492, 35508), 'tensorflow.real', 'tf.real', (['feature'], {}), '(feature)\n', (35499, 35508), True, 'import tensorflow as tf\n'), ((35509, 35525), 'tensorflow.imag', 'tf.imag', (['feature'], {}), '(feature)\n', (35516, 35525), True, 'import tensorflow as tf\n'), ((39831, 39842), 'tensorflow.real', 'tf.real', (['IQ'], {}), '(IQ)\n', (39838, 39842), True, 'import tensorflow as tf\n'), ((39843, 39854), 'tensorflow.imag', 'tf.imag', (['IQ'], {}), '(IQ)\n', (39850, 39854), True, 'import tensorflow as tf\n'), ((39936, 39963), 'numpy.arange', 'np.arange', (['HalfDataW', 'DataW'], {}), '(HalfDataW, DataW)\n', (39945, 39963), True, 'import numpy as np\n'), ((39964, 39987), 'numpy.arange', 'np.arange', (['(0)', 'HalfDataW'], {}), '(0, HalfDataW)\n', (39973, 39987), True, 'import numpy as np\n'), ((40225, 40241), 'tensorflow.real', 'tf.real', (['feature'], {}), '(feature)\n', (40232, 40241), True, 'import tensorflow as tf\n'), ((40242, 40258), 'tensorflow.imag', 'tf.imag', (['feature'], {}), '(feature)\n', (40249, 40258), True, 'import tensorflow as tf\n'), ((41353, 41364), 'tensorflow.real', 'tf.real', (['IQ'], {}), '(IQ)\n', (41360, 41364), True, 'import tensorflow as tf\n'), ((41365, 41376), 'tensorflow.imag', 'tf.imag', (['IQ'], {}), '(IQ)\n', (41372, 41376), True, 'import tensorflow as tf\n'), ((41458, 41485), 'numpy.arange', 'np.arange', (['HalfDataH', 'DataH'], {}), '(HalfDataH, DataH)\n', (41467, 41485), True, 'import numpy as np\n'), ((41486, 41509), 'numpy.arange', 'np.arange', (['(0)', 'HalfDataH'], {}), '(0, HalfDataH)\n', (41495, 41509), True, 'import numpy as np\n'), ((41844, 41860), 'tensorflow.real', 'tf.real', (['feature'], {}), '(feature)\n', (41851, 41860), True, 'import tensorflow as tf\n'), ((41861, 41877), 'tensorflow.imag', 'tf.imag', (['feature'], {}), '(feature)\n', (41868, 41877), True, 'import tensorflow as tf\n'), ((42971, 42982), 'tensorflow.real', 'tf.real', (['IQ'], {}), '(IQ)\n', (42978, 42982), True, 'import tensorflow as tf\n'), ((42983, 42994), 'tensorflow.imag', 'tf.imag', (['IQ'], {}), '(IQ)\n', (42990, 42994), True, 'import tensorflow as tf\n'), ((43103, 43130), 'numpy.arange', 'np.arange', (['HalfDataH', 'DataH'], {}), '(HalfDataH, DataH)\n', (43112, 43130), True, 'import numpy as np\n'), ((43131, 43154), 'numpy.arange', 'np.arange', (['(0)', 'HalfDataH'], {}), '(0, HalfDataH)\n', (43140, 43154), True, 'import numpy as np\n'), ((43208, 43235), 'numpy.arange', 'np.arange', (['HalfDataW', 'DataW'], {}), '(HalfDataW, DataW)\n', (43217, 43235), True, 'import numpy as np\n'), ((43236, 43259), 'numpy.arange', 'np.arange', (['(0)', 'HalfDataW'], {}), '(0, HalfDataW)\n', (43245, 43259), True, 'import numpy as np\n'), ((43661, 43677), 'tensorflow.real', 'tf.real', (['feature'], {}), '(feature)\n', (43668, 43677), True, 'import tensorflow as tf\n'), ((43678, 43694), 'tensorflow.imag', 'tf.imag', (['feature'], {}), '(feature)\n', (43685, 43694), True, 'import tensorflow as tf\n'), ((2471, 2489), 'tensorflow.identity', 'tf.identity', (['label'], {}), '(label)\n', (2482, 2489), True, 'import tensorflow as tf\n'), ((11053, 11063), 'tensorflow.real', 'tf.real', (['X'], {}), '(X)\n', (11060, 11063), True, 'import tensorflow as tf\n'), ((11064, 11074), 'tensorflow.imag', 'tf.imag', (['X'], {}), '(X)\n', (11071, 11074), True, 'import tensorflow as tf\n'), ((11130, 11140), 'tensorflow.real', 'tf.real', (['X'], {}), '(X)\n', (11137, 11140), True, 'import tensorflow as tf\n'), ((11141, 11151), 'tensorflow.imag', 'tf.imag', (['X'], {}), '(X)\n', (11148, 11151), True, 'import tensorflow as tf\n'), ((11345, 11367), 'numpy.arange', 'np.arange', (['(0)', 'BankSize'], {}), '(0, BankSize)\n', (11354, 11367), True, 'import numpy as np\n'), ((11433, 11463), 'numpy.arange', 'np.arange', (['(0)', '(BankSize * BankK)'], {}), '(0, BankSize * BankK)\n', (11442, 11463), True, 'import numpy as np\n'), ((12447, 12483), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""aaa"""'], {'reuse': '(True)'}), "('aaa', reuse=True)\n", (12464, 12483), True, 'import tensorflow as tf\n'), ((12510, 12551), 'tensorflow.get_variable', 'tf.get_variable', (['"""Bank"""'], {'dtype': 'tf.float32'}), "('Bank', dtype=tf.float32)\n", (12525, 12551), True, 'import tensorflow as tf\n'), ((12577, 12619), 'tensorflow.get_variable', 'tf.get_variable', (['"""LBank"""'], {'dtype': 'tf.float32'}), "('LBank', dtype=tf.float32)\n", (12592, 12619), True, 'import tensorflow as tf\n'), ((14992, 15014), 'tensorflow.real', 'tf.real', (['CurIWithPhase'], {}), '(CurIWithPhase)\n', (14999, 15014), True, 'import tensorflow as tf\n'), ((15015, 15037), 'tensorflow.imag', 'tf.imag', (['CurIWithPhase'], {}), '(CurIWithPhase)\n', (15022, 15037), True, 'import tensorflow as tf\n'), ((18846, 18856), 'tensorflow.real', 'tf.real', (['X'], {}), '(X)\n', (18853, 18856), True, 'import tensorflow as tf\n'), ((18857, 18867), 'tensorflow.imag', 'tf.imag', (['X'], {}), '(X)\n', (18864, 18867), True, 'import tensorflow as tf\n'), ((19284, 19306), 'numpy.arange', 'np.arange', (['(0)', 'BankSize'], {}), '(0, BankSize)\n', (19293, 19306), True, 'import numpy as np\n'), ((19372, 19402), 'numpy.arange', 'np.arange', (['(0)', '(BankSize * BankK)'], {}), '(0, BankSize * BankK)\n', (19381, 19402), True, 'import numpy as np\n'), ((20145, 20181), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""aaa"""'], {'reuse': '(True)'}), "('aaa', reuse=True)\n", (20162, 20181), True, 'import tensorflow as tf\n'), ((20208, 20249), 'tensorflow.get_variable', 'tf.get_variable', (['"""Bank"""'], {'dtype': 'tf.float32'}), "('Bank', dtype=tf.float32)\n", (20223, 20249), True, 'import tensorflow as tf\n'), ((20275, 20317), 'tensorflow.get_variable', 'tf.get_variable', (['"""LBank"""'], {'dtype': 'tf.float32'}), "('LBank', dtype=tf.float32)\n", (20290, 20317), True, 'import tensorflow as tf\n'), ((20572, 20625), 'tensorflow.scatter_nd_update', 'tf.scatter_nd_update', (['LBank', '[[label_index]]', '[label]'], {}), '(LBank, [[label_index]], [label])\n', (20592, 20625), True, 'import tensorflow as tf\n'), ((23807, 23842), 'tensorflow.slice', 'tf.slice', (['TFI', '[Idx[0], 0]', '[1, -1]'], {}), '(TFI, [Idx[0], 0], [1, -1])\n', (23815, 23842), True, 'import tensorflow as tf\n'), ((23848, 23896), 'tensorflow.slice', 'tf.slice', (['TFIb', '[Idx[0] - ChunkSize, 0]', '[1, -1]'], {}), '(TFIb, [Idx[0] - ChunkSize, 0], [1, -1])\n', (23856, 23896), True, 'import tensorflow as tf\n'), ((23955, 24007), 'tensorflow.slice', 'tf.slice', (['TFIc', '[Idx[0] - 2 * ChunkSize, 0]', '[1, -1]'], {}), '(TFIc, [Idx[0] - 2 * ChunkSize, 0], [1, -1])\n', (23963, 24007), True, 'import tensorflow as tf\n'), ((24009, 24061), 'tensorflow.slice', 'tf.slice', (['TFId', '[Idx[0] - 3 * ChunkSize, 0]', '[1, -1]'], {}), '(TFId, [Idx[0] - 3 * ChunkSize, 0], [1, -1])\n', (24017, 24061), True, 'import tensorflow as tf\n'), ((36593, 36626), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (36611, 36626), True, 'import tensorflow as tf\n'), ((36654, 36687), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (36672, 36687), True, 'import tensorflow as tf\n'), ((38014, 38046), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (38032, 38046), True, 'import tensorflow as tf\n'), ((38073, 38105), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (38091, 38105), True, 'import tensorflow as tf\n'), ((38137, 38169), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (38155, 38169), True, 'import tensorflow as tf\n'), ((38198, 38230), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (38216, 38230), True, 'import tensorflow as tf\n'), ((38259, 38291), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (38277, 38291), True, 'import tensorflow as tf\n'), ((38324, 38356), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (38342, 38356), True, 'import tensorflow as tf\n'), ((38386, 38419), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (38404, 38419), True, 'import tensorflow as tf\n'), ((38451, 38484), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (38469, 38484), True, 'import tensorflow as tf\n'), ((38690, 38722), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (38708, 38722), True, 'import tensorflow as tf\n'), ((38749, 38781), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (38767, 38781), True, 'import tensorflow as tf\n'), ((38813, 38845), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (38831, 38845), True, 'import tensorflow as tf\n'), ((38875, 38908), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (38893, 38908), True, 'import tensorflow as tf\n'), ((44377, 44409), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (44395, 44409), True, 'import tensorflow as tf\n'), ((44436, 44468), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (44454, 44468), True, 'import tensorflow as tf\n'), ((44500, 44532), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (44518, 44532), True, 'import tensorflow as tf\n'), ((44562, 44595), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (44580, 44595), True, 'import tensorflow as tf\n'), ((1356, 1366), 'tensorflow.real', 'tf.real', (['X'], {}), '(X)\n', (1363, 1366), True, 'import tensorflow as tf\n'), ((1367, 1377), 'tensorflow.imag', 'tf.imag', (['X'], {}), '(X)\n', (1374, 1377), True, 'import tensorflow as tf\n'), ((13214, 13231), 'tensorflow.identity', 'tf.identity', (['Bank'], {}), '(Bank)\n', (13225, 13231), True, 'import tensorflow as tf\n'), ((13295, 13313), 'tensorflow.identity', 'tf.identity', (['LBank'], {}), '(LBank)\n', (13306, 13313), True, 'import tensorflow as tf\n'), ((13412, 13437), 'tensorflow.identity', 'tf.identity', (['(IdxAX[0] * 2)'], {}), '(IdxAX[0] * 2)\n', (13423, 13437), True, 'import tensorflow as tf\n'), ((13445, 13469), 'tensorflow.identity', 'tf.identity', (['label_index'], {}), '(label_index)\n', (13456, 13469), True, 'import tensorflow as tf\n'), ((13529, 13558), 'tensorflow.identity', 'tf.identity', (['(IdxBX[0] * 2 + 1)'], {}), '(IdxBX[0] * 2 + 1)\n', (13540, 13558), True, 'import tensorflow as tf\n'), ((13564, 13592), 'tensorflow.identity', 'tf.identity', (['(label_index + 1)'], {}), '(label_index + 1)\n', (13575, 13592), True, 'import tensorflow as tf\n'), ((20683, 20700), 'tensorflow.identity', 'tf.identity', (['Bank'], {}), '(Bank)\n', (20694, 20700), True, 'import tensorflow as tf\n'), ((20764, 20782), 'tensorflow.identity', 'tf.identity', (['LBank'], {}), '(LBank)\n', (20775, 20782), True, 'import tensorflow as tf\n'), ((21683, 21771), 'GTools.TF_TSNUFFT_Run', 'GT.TF_TSNUFFT_Run', (['CurIWithPhase', 'SNc', 'paddings', 'nTraj', 'nTSC', 'nCh', 'sp_R', 'sp_I', 'TSBFX'], {}), '(CurIWithPhase, SNc, paddings, nTraj, nTSC, nCh, sp_R,\n sp_I, TSBFX)\n', (21700, 21771), True, 'import GTools as GT\n'), ((12746, 12837), 'GTools.TF_TSNUFFT_Run', 'GT.TF_TSNUFFT_Run', (['CurIWithPhaseA', 'SNcA', 'paddings', 'nTraj', 'nTSC', 'nCh', 'sp_R', 'sp_I', 'TSBFXA'], {}), '(CurIWithPhaseA, SNcA, paddings, nTraj, nTSC, nCh, sp_R,\n sp_I, TSBFXA)\n', (12763, 12837), True, 'import GTools as GT\n'), ((12890, 12981), 'GTools.TF_TSNUFFT_Run', 'GT.TF_TSNUFFT_Run', (['CurIWithPhaseB', 'SNcB', 'paddings', 'nTraj', 'nTSC', 'nCh', 'sp_R', 'sp_I', 'TSBFXB'], {}), '(CurIWithPhaseB, SNcB, paddings, nTraj, nTSC, nCh, sp_R,\n sp_I, TSBFXB)\n', (12907, 12981), True, 'import GTools as GT\n'), ((20428, 20516), 'GTools.TF_TSNUFFT_Run', 'GT.TF_TSNUFFT_Run', (['CurIWithPhase', 'SNc', 'paddings', 'nTraj', 'nTSC', 'nCh', 'sp_R', 'sp_I', 'TSBFX'], {}), '(CurIWithPhase, SNc, paddings, nTraj, nTSC, nCh, sp_R,\n sp_I, TSBFX)\n', (20445, 20516), True, 'import GTools as GT\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import numpy as np
import torch
import json
import cv2
import os
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from utils.image import draw_dense_reg
import math
class CTDetAngleDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
# def _rotation_bbox_to_segmentation(self, bbox):
# """
# :param bbox: format [x_c, y_c, w, h, theta]
# :return: format [x1, y1, x2, y2, x3, y3, x4, y4]
# """
# box = cv2.boxPoints(((bbox[0], bbox[1]), (bbox[2], bbox[3]), bbox[4]))
# box = np.reshape(box, [-1, ])
# boxes = [box[0], box[1], box[2], box[3], box[4], box[5], box[6], box[7]]
# return np.array(boxes, dtype=np.float32)
def _rotation_bbox_to_segmentation(self, bbox):
"""
:param bbox: format [x_c, y_c, w, h, theta]
:return: format [x1, y1, x2, y2, x3, y3, x4, y4]
"""
cos_w = 0.5 * bbox[2] * math.cos(bbox[4])
sin_w = 0.5 * bbox[2] * math.sin(bbox[4])
cos_h = 0.5 * bbox[3] * math.cos(bbox[4])
sin_h = 0.5 * bbox[3] * math.sin(bbox[4])
x0 = bbox[0] + cos_w + sin_h
y0 = bbox[1] - sin_w + cos_h
x1 = bbox[0] - cos_w + sin_h
y1 = bbox[1] + sin_w + cos_h
x2 = bbox[0] - cos_w - sin_h
y2 = bbox[1] + sin_w - cos_h
x3 = bbox[0] + cos_w - sin_h
y3 = bbox[1] - sin_w - cos_h
corners = [x0, y0, x1, y1, x2, y2, x3, y3]
return np.array(corners, dtype=np.float32)
def _segmentation_to_rotation_bbox(self, rect):
"""
:param rect: format [x1, y1, x2, y2, x3, y3, x4, y4]
:param with_label: default True
:return: format [x_c, y_c, w, h, theta]
"""
box = np.int0(rect)
box = box.reshape([-1, 2])
rect = cv2.minAreaRect(box)
rwidth, rheight = rect[1]
rangle = rect[2]
if rwidth > rheight:
rangle = np.abs(rangle)
else:
temp = rwidth
rwidth = rheight
rheight = temp
rangle = np.abs(rangle) + 90
rbox = [rect[0][0], rect[0][1], rwidth, rheight, rangle / 180.0 * 3.141593]
return np.array(rbox, dtype=np.float32)
# def _segmentation_to_rotation_bbox(self, rect):
# """
# :param rect: format [x1, y1, x2, y2, x3, y3, x4, y4]
# :param with_label: default True
# :return: format [x_c, y_c, w, h, theta]
# """
# box = np.int0(rect)
# box = box.reshape([-1, 2])
# rect1 = cv2.minAreaRect(box)
# x, y, w, h, theta = rect1[0][0], rect1[0][1], rect1[1][0], rect1[1][1], rect1[2]
# boxes = [x, y, w, h, theta]
# return np.array(boxes, dtype=np.float32)
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def __getitem__(self, index):
img_id = self.images[index]
file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
img_path = os.path.join(self.img_dir, file_name)
ann_ids = self.coco.getAnnIds(imgIds=[img_id], iscrowd=0)
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
img = cv2.imread(img_path)
img = cv2.resize(img, (512, 512))
self.split = 'train'
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
if self.opt.keep_res:
input_h = (height | self.opt.pad) + 1
input_w = (width | self.opt.pad) + 1
s = np.array([input_w, input_h], dtype=np.float32)
else:
s = max(img.shape[0], img.shape[1]) * 1.0
input_h, input_w = self.opt.input_h, self.opt.input_w
rot = 0
flipped = False
if self.split == 'train':
if not self.opt.not_rand_crop and (1 == 0):
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(128, img.shape[1])
h_border = self._get_border(128, img.shape[0])
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
if np.random.random() < self.opt.aug_rot:
rf = self.opt.rotate
rot = np.clip(np.random.randn()*rf, -rf*2, rf*2)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
c[0] = width - c[0] - 1
trans_input = get_affine_transform(
c, s, rot, [self.opt.input_res, self.opt.input_res])
inp = cv2.warpAffine(img, trans_input,
(self.opt.input_res, self.opt.input_res),
flags=cv2.INTER_LINEAR)
# debug img
# debug_img = inp.copy()
inp = (inp.astype(np.float32) / 255.)
if self.split == 'train' and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_h = input_h // self.opt.down_ratio
output_w = input_w // self.opt.down_ratio
num_classes = self.num_classes
trans_output_rot = get_affine_transform(c, s, rot, [output_w, output_h])
# trans_output = get_affine_transform(c, s, 0, [output_w, output_h])
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
cxcy = np.zeros((self.max_objs, 2), dtype=np.float32)
angle = np.zeros((self.max_objs, 1), dtype=np.float32)
dense_wh = np.zeros((2, output_h, output_w), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
cat_spec_wh = np.zeros((self.max_objs, num_classes * 2), dtype=np.float32)
cat_spec_mask = np.zeros((self.max_objs, num_classes * 2), dtype=np.uint8)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
gt_det_angle = []
for k in range(num_objs):
ann = anns[k]
cls_id = int(self.cat_ids[ann['category_id']])
segmentation = ann['segmentation'][0]
segmentation = np.array(segmentation, dtype=np.float32)
if flipped:
segmentation[0::2] = width - segmentation[0::2] - 1
segmentation[:2] = affine_transform(segmentation[:2], trans_output_rot)
segmentation[2:4] = affine_transform(segmentation[2:4], trans_output_rot)
segmentation[4:6] = affine_transform(segmentation[4:6], trans_output_rot)
segmentation[6:8] = affine_transform(segmentation[6:8], trans_output_rot)
if not self.opt.not_clip:
cx = (segmentation[0] + segmentation[2] +
segmentation[4] + segmentation[6]) / 4
cy = (segmentation[1] + segmentation[3] +
segmentation[5] + segmentation[7]) / 4
if cx < 0 or cy < 0 or cx > output_w or cy > output_h:
continue
if np.abs(segmentation[2] - segmentation[0]) < 1e-4 or np.abs(segmentation[6] - segmentation[4]) < 1e-4:
segmentation[0::2] = np.clip(segmentation[0::2], 0, output_w)
segmentation[1::2] = np.clip(segmentation[1::2], 0, output_h)
else:
# line 0-1
k_01 = (segmentation[3] - segmentation[1]) / \
(segmentation[2] - segmentation[0])
b_01 = segmentation[3] - k_01 * segmentation[2]
# line 2-3
k_23 = (segmentation[7] - segmentation[5]) / \
(segmentation[6] - segmentation[4])
b_23 = segmentation[7] - k_23 * segmentation[6]
# 0
if segmentation[0] < 0:
segmentation[0] = 0
segmentation[1] = b_01
if segmentation[0] > output_w:
segmentation[0] = output_w
segmentation[1] = k_01 * output_w + b_01
if segmentation[1] < 0:
segmentation[1] = 0
segmentation[0] = -b_01 / k_01
if segmentation[1] > output_h:
segmentation[1] = output_h
segmentation[0] = (output_h-b_01) / k_01
# 1
if segmentation[2] < 0:
segmentation[2] = 0
segmentation[3] = b_01
if segmentation[2] > output_w:
segmentation[2] = output_w
segmentation[3] = k_01 * output_w + b_01
if segmentation[3] < 0:
segmentation[3] = 0
segmentation[2] = -b_01 / k_01
if segmentation[3] > output_h:
segmentation[3] = output_h
segmentation[2] = (output_h-b_01) / k_01
# 2
if segmentation[4] < 0:
segmentation[4] = 0
segmentation[5] = b_23
if segmentation[4] > output_w:
segmentation[4] = output_w
segmentation[5] = k_23 * output_w + b_23
if segmentation[5] < 0:
segmentation[5] = 0
segmentation[4] = -b_23 / k_23
if segmentation[5] > output_h:
segmentation[5] = output_h
segmentation[4] = (output_h-b_23) / k_23
# 3
if segmentation[6] < 0:
segmentation[6] = 0
segmentation[7] = b_23
if segmentation[6] > output_w:
segmentation[6] = output_w
segmentation[7] = k_23 * output_w + b_23
if segmentation[7] < 0:
segmentation[7] = 0
segmentation[6] = -b_23 / k_23
if segmentation[7] > output_h:
segmentation[7] = output_h
segmentation[6] = (output_h-b_23) / k_23
# debug corners
# corners = segmentation.copy().reshape(-1, 1, 2) * self.opt.down_ratio
# corners = corners.astype(int)
# cv2.polylines(debug_img, [corners], True, (0, 255, 255), 2)
bbox = self._segmentation_to_rotation_bbox(segmentation)
ct_x, ct_y, w, h, theta = bbox
if h == 0:
h += 1
if h > 0 and w > 0 and ct_x > 0 and ct_y > 0 and ct_x < output_w and ct_y < output_h:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
radius = self.opt.hm_gauss if self.opt.mse_loss else radius
ct = np.array([ct_x, ct_y], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(hm[cls_id], ct_int, radius)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
angle[k] = theta
cxcy[k] = 1. * ct_x, 1. * ct_y
reg_mask[k] = 1
cat_spec_wh[k, cls_id * 2: cls_id * 2 + 2] = wh[k]
cat_spec_mask[k, cls_id * 2: cls_id * 2 + 2] = 1
if self.opt.dense_wh:
draw_dense_reg(dense_wh, hm.max(axis=0), ct_int, wh[k], radius)
gt_det_angle.append([ct_x, ct_y, w, h, theta, 1, cls_id])
# save_debug_img
# debug_path = "/home/czm/centernet-pytorch-1.1/debug/{}".format(file_name)
# cv2.imwrite(debug_path, debug_img)
ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask,
'ind': ind, 'wh': wh, 'angle': angle, 'cxcy': cxcy}
if self.opt.dense_wh:
hm_a = hm.max(axis=0, keepdims=True)
dense_wh_mask = np.concatenate([hm_a, hm_a], axis=0)
ret.update({'dense_wh': dense_wh, 'dense_wh_mask': dense_wh_mask})
del ret['wh']
elif self.opt.cat_spec_wh:
ret.update({'cat_spec_wh': cat_spec_wh, 'cat_spec_mask': cat_spec_mask})
del ret['wh']
if self.opt.reg_offset:
ret.update({'reg': reg})
if self.opt.debug > 0 or not self.split == 'train':
gt_det_angle = np.array(gt_det_angle, dtype=np.float32) if len(gt_det_angle) > 0 else \
np.zeros((1, 7), dtype=np.float32)
meta = {'c': c, 's': s, 'gt_det_angle': gt_det_angle, 'img_id': img_id}
ret['meta'] = meta
# debug
return ret
| [
"numpy.clip",
"math.cos",
"numpy.array",
"utils.image.get_affine_transform",
"numpy.arange",
"numpy.random.random",
"cv2.minAreaRect",
"numpy.concatenate",
"numpy.abs",
"cv2.warpAffine",
"numpy.int0",
"cv2.resize",
"utils.image.color_aug",
"cv2.imread",
"numpy.random.randn",
"math.ceil... | [((530, 608), 'numpy.array', 'np.array', (['[box[0], box[1], box[0] + box[2], box[1] + box[3]]'], {'dtype': 'np.float32'}), '([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32)\n', (538, 608), True, 'import numpy as np\n'), ((1736, 1771), 'numpy.array', 'np.array', (['corners'], {'dtype': 'np.float32'}), '(corners, dtype=np.float32)\n', (1744, 1771), True, 'import numpy as np\n'), ((1986, 1999), 'numpy.int0', 'np.int0', (['rect'], {}), '(rect)\n', (1993, 1999), True, 'import numpy as np\n'), ((2042, 2062), 'cv2.minAreaRect', 'cv2.minAreaRect', (['box'], {}), '(box)\n', (2057, 2062), False, 'import cv2\n'), ((2370, 2402), 'numpy.array', 'np.array', (['rbox'], {'dtype': 'np.float32'}), '(rbox, dtype=np.float32)\n', (2378, 2402), True, 'import numpy as np\n'), ((3157, 3194), 'os.path.join', 'os.path.join', (['self.img_dir', 'file_name'], {}), '(self.img_dir, file_name)\n', (3169, 3194), False, 'import os\n'), ((3355, 3375), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (3365, 3375), False, 'import cv2\n'), ((3386, 3413), 'cv2.resize', 'cv2.resize', (['img', '(512, 512)'], {}), '(img, (512, 512))\n', (3396, 3413), False, 'import cv2\n'), ((3494, 3562), 'numpy.array', 'np.array', (['[img.shape[1] / 2.0, img.shape[0] / 2.0]'], {'dtype': 'np.float32'}), '([img.shape[1] / 2.0, img.shape[0] / 2.0], dtype=np.float32)\n', (3502, 3562), True, 'import numpy as np\n'), ((4828, 4901), 'utils.image.get_affine_transform', 'get_affine_transform', (['c', 's', 'rot', '[self.opt.input_res, self.opt.input_res]'], {}), '(c, s, rot, [self.opt.input_res, self.opt.input_res])\n', (4848, 4901), False, 'from utils.image import get_affine_transform, affine_transform\n'), ((4921, 5023), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'trans_input', '(self.opt.input_res, self.opt.input_res)'], {'flags': 'cv2.INTER_LINEAR'}), '(img, trans_input, (self.opt.input_res, self.opt.input_res),\n flags=cv2.INTER_LINEAR)\n', (4935, 5023), False, 'import cv2\n'), ((5506, 5559), 'utils.image.get_affine_transform', 'get_affine_transform', (['c', 's', 'rot', '[output_w, output_h]'], {}), '(c, s, rot, [output_w, output_h])\n', (5526, 5559), False, 'from utils.image import get_affine_transform, affine_transform\n'), ((5643, 5704), 'numpy.zeros', 'np.zeros', (['(num_classes, output_h, output_w)'], {'dtype': 'np.float32'}), '((num_classes, output_h, output_w), dtype=np.float32)\n', (5651, 5704), True, 'import numpy as np\n'), ((5714, 5760), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (5722, 5760), True, 'import numpy as np\n'), ((5772, 5818), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (5780, 5818), True, 'import numpy as np\n'), ((5831, 5877), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 1)'], {'dtype': 'np.float32'}), '((self.max_objs, 1), dtype=np.float32)\n', (5839, 5877), True, 'import numpy as np\n'), ((5893, 5944), 'numpy.zeros', 'np.zeros', (['(2, output_h, output_w)'], {'dtype': 'np.float32'}), '((2, output_h, output_w), dtype=np.float32)\n', (5901, 5944), True, 'import numpy as np\n'), ((5955, 6001), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (5963, 6001), True, 'import numpy as np\n'), ((6012, 6051), 'numpy.zeros', 'np.zeros', (['self.max_objs'], {'dtype': 'np.int64'}), '(self.max_objs, dtype=np.int64)\n', (6020, 6051), True, 'import numpy as np\n'), ((6069, 6108), 'numpy.zeros', 'np.zeros', (['self.max_objs'], {'dtype': 'np.uint8'}), '(self.max_objs, dtype=np.uint8)\n', (6077, 6108), True, 'import numpy as np\n'), ((6129, 6189), 'numpy.zeros', 'np.zeros', (['(self.max_objs, num_classes * 2)'], {'dtype': 'np.float32'}), '((self.max_objs, num_classes * 2), dtype=np.float32)\n', (6137, 6189), True, 'import numpy as np\n'), ((6210, 6268), 'numpy.zeros', 'np.zeros', (['(self.max_objs, num_classes * 2)'], {'dtype': 'np.uint8'}), '((self.max_objs, num_classes * 2), dtype=np.uint8)\n', (6218, 6268), True, 'import numpy as np\n'), ((1258, 1275), 'math.cos', 'math.cos', (['bbox[4]'], {}), '(bbox[4])\n', (1266, 1275), False, 'import math\n'), ((1304, 1321), 'math.sin', 'math.sin', (['bbox[4]'], {}), '(bbox[4])\n', (1312, 1321), False, 'import math\n'), ((1350, 1367), 'math.cos', 'math.cos', (['bbox[4]'], {}), '(bbox[4])\n', (1358, 1367), False, 'import math\n'), ((1396, 1413), 'math.sin', 'math.sin', (['bbox[4]'], {}), '(bbox[4])\n', (1404, 1413), False, 'import math\n'), ((2154, 2168), 'numpy.abs', 'np.abs', (['rangle'], {}), '(rangle)\n', (2160, 2168), True, 'import numpy as np\n'), ((3684, 3730), 'numpy.array', 'np.array', (['[input_w, input_h]'], {'dtype': 'np.float32'}), '([input_w, input_h], dtype=np.float32)\n', (3692, 3730), True, 'import numpy as np\n'), ((5222, 5282), 'utils.image.color_aug', 'color_aug', (['self._data_rng', 'inp', 'self._eig_val', 'self._eig_vec'], {}), '(self._data_rng, inp, self._eig_val, self._eig_vec)\n', (5231, 5282), False, 'from utils.image import flip, color_aug\n'), ((6555, 6595), 'numpy.array', 'np.array', (['segmentation'], {'dtype': 'np.float32'}), '(segmentation, dtype=np.float32)\n', (6563, 6595), True, 'import numpy as np\n'), ((6699, 6751), 'utils.image.affine_transform', 'affine_transform', (['segmentation[:2]', 'trans_output_rot'], {}), '(segmentation[:2], trans_output_rot)\n', (6715, 6751), False, 'from utils.image import get_affine_transform, affine_transform\n'), ((6778, 6831), 'utils.image.affine_transform', 'affine_transform', (['segmentation[2:4]', 'trans_output_rot'], {}), '(segmentation[2:4], trans_output_rot)\n', (6794, 6831), False, 'from utils.image import get_affine_transform, affine_transform\n'), ((6858, 6911), 'utils.image.affine_transform', 'affine_transform', (['segmentation[4:6]', 'trans_output_rot'], {}), '(segmentation[4:6], trans_output_rot)\n', (6874, 6911), False, 'from utils.image import get_affine_transform, affine_transform\n'), ((6938, 6991), 'utils.image.affine_transform', 'affine_transform', (['segmentation[6:8]', 'trans_output_rot'], {}), '(segmentation[6:8], trans_output_rot)\n', (6954, 6991), False, 'from utils.image import get_affine_transform, affine_transform\n'), ((11494, 11530), 'numpy.concatenate', 'np.concatenate', (['[hm_a, hm_a]'], {'axis': '(0)'}), '([hm_a, hm_a], axis=0)\n', (11508, 11530), True, 'import numpy as np\n'), ((2258, 2272), 'numpy.abs', 'np.abs', (['rangle'], {}), '(rangle)\n', (2264, 2272), True, 'import numpy as np\n'), ((4145, 4206), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'w_border', 'high': '(img.shape[1] - w_border)'}), '(low=w_border, high=img.shape[1] - w_border)\n', (4162, 4206), True, 'import numpy as np\n'), ((4222, 4283), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'h_border', 'high': '(img.shape[0] - h_border)'}), '(low=h_border, high=img.shape[0] - h_border)\n', (4239, 4283), True, 'import numpy as np\n'), ((4553, 4571), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4569, 4571), True, 'import numpy as np\n'), ((4688, 4706), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4704, 4706), True, 'import numpy as np\n'), ((10531, 10571), 'numpy.array', 'np.array', (['[ct_x, ct_y]'], {'dtype': 'np.float32'}), '([ct_x, ct_y], dtype=np.float32)\n', (10539, 10571), True, 'import numpy as np\n'), ((11890, 11930), 'numpy.array', 'np.array', (['gt_det_angle'], {'dtype': 'np.float32'}), '(gt_det_angle, dtype=np.float32)\n', (11898, 11930), True, 'import numpy as np\n'), ((11973, 12007), 'numpy.zeros', 'np.zeros', (['(1, 7)'], {'dtype': 'np.float32'}), '((1, 7), dtype=np.float32)\n', (11981, 12007), True, 'import numpy as np\n'), ((7456, 7496), 'numpy.clip', 'np.clip', (['segmentation[0::2]', '(0)', 'output_w'], {}), '(segmentation[0::2], 0, output_w)\n', (7463, 7496), True, 'import numpy as np\n'), ((7528, 7568), 'numpy.clip', 'np.clip', (['segmentation[1::2]', '(0)', 'output_h'], {}), '(segmentation[1::2], 0, output_h)\n', (7535, 7568), True, 'import numpy as np\n'), ((3994, 4018), 'numpy.arange', 'np.arange', (['(0.6)', '(1.4)', '(0.1)'], {}), '(0.6, 1.4, 0.1)\n', (4003, 4018), True, 'import numpy as np\n'), ((4643, 4660), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (4658, 4660), True, 'import numpy as np\n'), ((7323, 7364), 'numpy.abs', 'np.abs', (['(segmentation[2] - segmentation[0])'], {}), '(segmentation[2] - segmentation[0])\n', (7329, 7364), True, 'import numpy as np\n'), ((7375, 7416), 'numpy.abs', 'np.abs', (['(segmentation[6] - segmentation[4])'], {}), '(segmentation[6] - segmentation[4])\n', (7381, 7416), True, 'import numpy as np\n'), ((10384, 10396), 'math.ceil', 'math.ceil', (['h'], {}), '(h)\n', (10393, 10396), False, 'import math\n'), ((10398, 10410), 'math.ceil', 'math.ceil', (['w'], {}), '(w)\n', (10407, 10410), False, 'import math\n'), ((4380, 4397), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (4395, 4397), True, 'import numpy as np\n'), ((4443, 4460), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (4458, 4460), True, 'import numpy as np\n'), ((4502, 4519), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (4517, 4519), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import argparse as ap
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as clr
from hmmlearn.hmm import GaussianHMM
import scipy.stats as scistats
import logging
import pickle
import os, ntpath
import tables
import cooler
from scipy.sparse import csr_matrix, triu, lil_matrix
def toString(s):
"""
This takes care of python2/3 differences
"""
if isinstance(s, str):
return s
if isinstance(s, bytes): # or isinstance(s, np.bytes_):
if sys.version_info[0] == 2:
return str(s)
return s.decode('ascii')
if isinstance(s, list):
return [toString(x) for x in s]
if isinstance(s, np.ndarray):
return s.astype(str)
return s
def loadH5(filename, includechroms=None, csr=True, returnintervals = False, dtype = int):
'''
loadH5(filename, includechroms=None, csr=True, returnintervals = False)
loads an *.h5 hic matrix as created by hicexplorer
:param filename: name of the *.h5 file containing the matrix
:param includechroms: list of chromosomes to include in the returned objects
if not given all chromosomes in the *.h5 file are included
:param csr: if True returns a csr_matrix object else a full numpy.array
:param returnintervals: if True also returns the intervals read
:return: csr_matrix containing the data in the matrix
'''
with tables.open_file(filename) as f:
parts = {}
try:
for matrix_part in ('data', 'indices', 'indptr', 'shape'):
parts[matrix_part] = getattr(f.root.matrix, matrix_part).read()
except Exception:
logging.info('No h5 file. Please check parameters concerning the file type!')
exit(1)
matrix = csr_matrix(tuple([parts['data'], parts['indices'], parts['indptr']]),
shape=parts['shape'], dtype=dtype)
intervals = {}
for interval_part in ('chr_list', 'start_list', 'end_list', 'extra_list'):
if toString(interval_part) == toString('chr_list'):
chrom_list = getattr(f.root.intervals, interval_part).read()
intervals[interval_part] = toString(chrom_list)
else:
intervals[interval_part] = getattr(f.root.intervals, interval_part).read()
cut_intervals = list(
zip(intervals['chr_list'], intervals['start_list'], intervals['end_list'], intervals['extra_list']))
assert len(cut_intervals) == matrix.shape[0], \
"Error loading matrix. Length of bin intervals ({}) is different than the " \
"size of the matrix ({})".format(len(cut_intervals), matrix.shape[0])
# compute index array and chromosome list
inds, chr_list, chroms = [], [], set()
for i, (chr, start, end, extra) in enumerate(cut_intervals):
if chr not in chroms:
chroms.add(chr)
inds.append(i)
chr_list.append(chr)
# if includechroms is given we filter the output for the chromosomes listed
# and recompute indices of chromosome boundaries in the resulting matrix
if includechroms:
includechroms = set(includechroms)
filterinds, filterchrs = [], []
for i, chr in zip(range(len(inds)), chr_list):
if chr in includechroms:
filterinds.append([inds[i], inds[i + 1] if i + 1 != len(inds) else matrix.shape[0]])
filterchrs.append(chr)
matrixinds = np.zeros(shape=matrix.shape[0], dtype=bool)
ncuts, tmpe = [], 0
for s, e in filterinds:
matrixinds[s: e] = True
if s == tmpe:
ncuts.append(s)
tmpe = e
else:
ncuts.append(tmpe)
tmpe = e - s + tmpe
matrix = matrix[matrixinds, :][:, matrixinds]
inds = ncuts
chr_list = filterchrs
if not csr:
x = matrix.toarray()
xi, yi = np.triu_indices(x.shape[0], k=1)
x[yi, xi] = x[xi, yi]
matrix = x
if returnintervals:
return matrix, np.array(inds), np.array(chr_list), intervals
else:
return matrix, np.array(inds), np.array(chr_list)
def loadCooler(cooleruri, applyNorm = False, norm = 'weight', includeChroms = None, nans_to_zero = False):
'''
loads a cooler into a csr matrix
taken from HiCMatrix cool.py see also
https://github.com/deeptools/HiCMatrix/blob/master/hicmatrix/lib/cool.py
:param cooleruri: uri to a given cooler
:param applyNorm: if True then the 'norm' is applied to the datapoints in the matrix
:param norm: normalization weights to apply if applyNorm is set True
:param includeChroms: list of chromosomes to load, if given only the specified chromosomes will be loaded from the cooler
:return: data in cooler as scipy.sparse.csr_matrix
'''
cooler_file = cooler.Cooler(cooleruri)
matrix = cooler_file.matrix(balance = norm if applyNorm else False)[:]
chroms = cooler_file.chromnames
inds = set()
for chrom in chroms:
for binidx in cooler_file.extent(chrom):
inds.add(binidx)
inds = sorted(list(inds))
if includeChroms:
includechroms = set(includeChroms)
filterinds, filterchroms = [], []
for i, chr in zip(range(len(inds)), chroms):
if chr in includechroms:
filterinds.append([inds[i], inds[i + 1] if i + 1 != len(inds) else matrix.shape[0]])
filterchroms.append(chr)
matrixinds = np.zeros(shape=matrix.shape[0], dtype=bool)
ncuts, tmpe = [], 0
for s, e in filterinds:
matrixinds[s: e] = True
if s == tmpe:
ncuts.append(s)
tmpe = e
else:
ncuts.append(tmpe)
tmpe = e - s + tmpe
matrix = matrix[matrixinds, :][:, matrixinds]
inds = ncuts
chroms = filterchroms
if nans_to_zero:
matrix[np.isnan(matrix)] = 0
return matrix, np.array(inds), np.array(chroms)
def constructClusterContactMatrix(gwmat, chrlist, indarr, excluderows = None, excludecols = None,
imputerows = None, imputecols = None, removelim = 0.3, withX = False,
even = False, transform = True):
'''
constructClusterContactMatrix(gwmat, chrlist, indarr, removelim = 0.3, excluderows = None, excludecols = None,
imputerows = None, imputecols = None, withX = False, even = False, transform = True)
given a normalized, genomewide contact matrix (can be constructed with
ConstructGenomeWideContactMatrix) constructs a matrix C suitable for performing
clustering as described in Rao et al. 2014. In particular C is constructed such
that C_i,j contains the normalized interaction between odd chromosome i and even
chromosome j. Rows and columns with a number zeros or undefined entries larger than
removelim of the row/col are removed. Note that the bins to be removed are computed
sequentially first rows then columns. If even is True the matrix is transposed
prior to row/col removal to keep removal for odd and even chromosomes consistent
rows and columns given by excluderows/cols are excluded from the analysis. However,
you can also pass a list of rows and columns using the imputerows/cols to impute specific
values specified by row/col with a random value drawn from the rows distribution before
z-score transformation
:param gwmat: genomewide normalized contact matrix
:param chrlist: sorted list of chromosomes in gwmat
(ascending chr1, chr2, ..., chr10, chr11, ..., chrX
:param indarr: array containing the indices of the single matrices in gwmat
see ConstructGenomeWideContactMatrix for more details
:param excluderows: list of row indices corresponding to indarr that should be excluded
:param excludecols: list of column indices corresponding to indarr that should be excluded
from the clustering matrix
:param imputerows: list of rows for which an imputation should be performed
:param imputecols: list of cols for which an imputation should be performed
:param removelim: limit of fraction of undefined or zero entries in row/col
rows/cols with sum(0 | NaN)/len(row | col) > 0.3 are removed
:param withX: if True chromosome X is included in the even chromosomes
:param even: if True Cij is transposed prior to row/col removal
:param transform: if True logarithm and zscore transformation are applied to Cij
:return: contact subset matrix where rows are only composed of odd chromosomes
and columns of even chromosomes (including X if withx = True, or vice versa
if even = True) rowindices that were removed, column indices that were removed
'''
# building boolean index arrays for row and columns
colindex = np.zeros(shape = gwmat.shape[1], dtype = bool)
rowindex = np.zeros(shape = gwmat.shape[0], dtype = bool)
if excluderows or imputerows:
processrowchroms = set()
for indlist, indtype in zip([excluderows, imputerows], ['exclude', 'impute']):
if not indlist:
continue
else:
rowcounts, rowbins = np.histogram(indlist, bins = [i for i in indarr] + [gwmat.shape[0]])
processrowchroms.update(chrlist[np.where(rowcounts > 0)])
# copy list to make sure the original does not get altered
if indtype == 'exclude':
excluderows = excluderows.copy()
else:
imputerows = imputerows.copy()
else:
processrowchroms = set()
if excludecols or imputecols:
processcolchroms = set()
for indlist, indtype in zip([excludecols, imputecols], ['exclude', 'impute']):
if not indlist:
continue
else:
colcounts, colbins = np.histogram(indlist, bins=[i for i in indarr] + [gwmat.shape[0]])
processcolchroms.update(chrlist[np.where(colcounts > 0)])
# copy list to make sure the original does not get altered
if indtype == 'exclude':
excludecols = excludecols.copy()
else:
imputecols = imputecols.copy()
else:
processcolchroms = set()
# transformed index for row and col
rowtransform = [0]
coltransform = [0]
for i, chr in enumerate(chrlist):
if chr == 'chrX' and not withX:
continue
else:
if i%2 == 0:
rowtransform.append(rowtransform[-1] + indarr[i + 1] - indarr[i] if i + 1 != len(chrlist)
else rowtransform[-1] + gwmat.shape[0] - indarr[i])
rowindex[indarr[i]: indarr[i + 1] if i + 1 != len(chrlist) else gwmat.shape[0]] = True
if chr in processrowchroms:
for indlist in [imputerows, excludecols]:
if not indlist:
continue
else:
for j in range(len(indlist)):
if indlist[j] < indarr[i + 1] if i != len(chrlist) else gwmat.shape[0]:
indlist[j] = indlist[j] - indarr[i] + rowtransform[-2]
else:
coltransform.append(coltransform[-1] + indarr[i + 1] - indarr[i] if i + 1 != len(chrlist)
else coltransform[-1] + gwmat.shape[0] - indarr[i])
colindex[indarr[i]: indarr[i + 1] if i + 1 != len(chrlist) else gwmat.shape[1]] = True
if chr in processcolchroms:
for indlist in [imputecols, excludecols]:
if not indlist:
continue
else:
for j in range(len(indlist)):
if indlist[j] < indarr[i + 1] if i != len(chrlist) else gwmat.shape[0]:
indlist[j] = indlist[j] - indarr[i] + coltransform[-2]
# constructing interchromosome contact matrix
Cij = gwmat[rowindex, :][:, colindex]
# setting inf and nan to 0
Cij[np.isnan(Cij) | np.isinf(Cij)] = 0
if even:
Cij = Cij.T
# if even we flip excludes and imputes
tmpexclude, tmpimpute = excluderows, imputerows
excluderows, imputerows = excludecols, imputecols
excludecols, imputecols = tmpexclude, tmpimpute
# computing fractions of 0 elements in rows
rowzerofrac = 1 - np.count_nonzero(Cij, axis = 1)/Cij.shape[1]
colzerofrac = 1 - np.count_nonzero(Cij, axis = 0)/Cij.shape[0]
# finding indices of rows and removing them
rowrembins = np.where(rowzerofrac > removelim)[0]
boolrowrembins = rowzerofrac > removelim
if excluderows:
rowrembins = np.concatenate([rowrembins, np.array(excluderows)])
rowrembins.sort()
boolrowrembins[excluderows] = True
Cij = Cij[~boolrowrembins, :]
# same for columns
colrembins = np.where(colzerofrac > removelim)[0]
boolcolrembins = colzerofrac > removelim
if excludecols:
colrembins = np.concatenate([colrembins, np.array(excludecols)])
colrembins.sort()
boolcolrembins[excludecols] = True
Cij = Cij[:, ~boolcolrembins]
if transform:
# making sure logarithm is well defined
Cij[Cij == 0] = 1
# taking the logarithm
Cij = np.log(Cij)
# imputing values
if imputerows and imputecols:
colsubtract = (colrembins < imputecols[0]).sum()
startcol = imputecols[0] - colsubtract
endcol = imputecols[-1] - colsubtract - (colrembins > imputecols[0]).sum() + \
(colrembins > imputecols[-1]).sum() + 1
rowsubtract = (rowrembins < imputerows[0]).sum()
startrow = imputerows[0] - rowsubtract
endrow = imputerows[-1] - rowsubtract - (rowrembins > imputerows[0]).sum() + \
(rowrembins > imputerows[-1]).sum() + 1
for row in range(startrow, endrow):
rv = scistats.norm(loc=Cij[row].mean(), scale=Cij[row].std())
zeros = np.where(Cij[row] == 0)[0]
Cij[row, startcol: endcol] = rv.rvs(size= endcol - startcol)
Cij[row, zeros] = 0
elif (imputerows and not imputecols) or (imputecols and not imputerows):
raise Exception('Both imputerows and cols have to be given')
# applying row-wise zscore calculation
Cij = scistats.zscore(Cij, axis = 1, ddof = 1)
return Cij, rowrembins, colrembins
def clusterMatrix(Cij, n_components, covariance_type = 'diag', n_iter = 1000):
'''
clusterMatrix(Cij, n_components, covariance_type = 'diag', n_iter = 1000)
applies a GaussianHMM clustering to the processed genomewide interchromosomal
contact matrix Cij as generated by constructClusterContactMatrix
:param Cij: processed genomewide interchromosomal contact matrix
as generated by constructClusterContactMatrix
:param n_components: number of clusters to find
:param covariance_type: type of the covariance matrix to use (see hmmlearn documentation for more details)
:param n_iter: number of iterations allowed
:return: numpy.array containing numbers from 0 to n_components - 1
specifying the cluster to which each bin belongs and the model with
which it was calculated (i.e. fitted hmmlearn.hmm.GaussianHMM object)
'''
# initializing HMM object
model = GaussianHMM(n_components = n_components, covariance_type = covariance_type, n_iter = n_iter, verbose = True)
# fitting parameters
model.fit(Cij)
# compute the most likely state sequence using viterbi
clusters = model.predict(Cij)
return clusters, model
def plotClustering(Cij, clusters, ax, colors, vmin = 0, vmax = 10, title = None):
'''
plotClustering(Cij, clusters, ax, colors, vmin = 0, vmax = 10, title = None)
plots Cij using the information given by clusters (i.e. row-wise clusters inferred
by GaussianHMM) in a multicolor heatmap
:param Cij: processed genomewide interchromosomal contact matrix
as generated by constructClusterContactMatrix
:param clusters: numpy.array containing most probable hidden state per row in Cij
:param ax: ax to which the heatmap should be plotted
:param colors: list of colors to use for each cluster
:param vmax: maximum value of the array corresponding to the max color value
:param vmin: minimum value of the array corresponding to the min color value
:param title: title of the heatmap
:return: matplotlib.Axes object, dictionary of pcolormeshes
'''
meshes = {}
for clustnum, color in zip(np.unique(clusters), colors):
# constructing masking array
mask = np.tile(clusters != clustnum, (Cij.shape[1], 1)).T
# masking Cij
maskcij = np.ma.masked_where(mask, Cij)
# generating colormap from color
cmap = clr.LinearSegmentedColormap.from_list('map' + str(clustnum), ['White', color], N = 256)
# plotting heatmap
meshes[clustnum] = ax.imshow(maskcij, cmap = cmap, label = str(clustnum), vmin = vmin, vmax = vmax, aspect = 'auto')
if title:
ax.set_title(title)
return ax, meshes
def computeInformationCriteria(model, data, n, covariance_type = 'diag'):
'''
plotInformationCriterion(data, model, covariance_type = 'diag')
given the original data and the model of a gaussian HMM computes the AIC and BIC
:param model: fitted gaussian HMM
:param data: data on which the gaussian HMM model was fitted
:param n: number of components used in model fitting (i.e. number of clusters)
:param covariance_type: type of the covariance matrix to use (see hmmlearn documentation for more details)
:return: matplotlib.Axes object containing the plot, dictionary of numpy.arrays containing IC values
'''
# calculating number of model parameters
# for a given model with N states the number of transition parameters
# can be calculated by noticing that at each time t we are able to transit
# to any other state in N. Since sum(P(S)) = 1 we know the last parameter if
# we know the other N - 1
transitionparams = n*(n-1)
# each state than harbors a certain emission probability governed by
# a multivariate gaussian distribution for which we have
# M parameters controlling the means and in case of the diagonal
# covariance matrix we have another M parameters (M(M + 1)/2 in case of full)
# M is the number of variables (i.e. number of rows)
if covariance_type == 'diag':
emissionparams = 2*n*data.shape[0]
else:
emissionparams = n*data.shape[0]*(data.shape[0] + 3)/2
BIC = -2*model.score(data) + np.log(data.shape[0])*(transitionparams + emissionparams)
AIC = -2*model.score(data) + 2*(transitionparams + emissionparams)
return AIC, BIC
def plotInformationCriterion(values, label, mink, maxk, title, ax):
ax.plot(np.arange(mink, maxk + 1), values[mink - 1:], label = label, ls = '--', zorder = 1)
ax.scatter(np.arange(mink, maxk + 1), values[mink - 1:], marker = '.', color = 'dimgrey')
ax.set_ylabel(title)
ax.set_xlabel('n clusters')
ax.legend()
ax.set_xlim(mink, maxk)
ax.set_title(title)
return ax
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
parser = ap.ArgumentParser()
parser.add_argument('-m', '--matrix', required = True,
help = '''cool, h5 or npz file holding the genomewide KR normalized contact matrix.
Also has to contain the indexarray and chromosome list as returned by
constructGenomeWideContactMatrix''')
parser.add_argument('--inputFormat', default = 'cool', choices = ['cool', 'h5', 'npz'],
help = 'specifies the format of the input file')
parser.add_argument('--weightName', default = 'weight',
help = 'name of the correction weights to apply to matrix if --inputFormat == cool')
parser.add_argument('--mink', default = 1, type = int,
help = 'minimum number of clusters k')
parser.add_argument('--maxk', default = 20, type = int,
help = 'maximumn number of clusters k')
parser.add_argument('-r', '--removelim', default = 0.3, type = float,
help = '''determines the fraction of entries in a row/col of the clustering matrix allowed to be 0
if the fraction of entries is larger the row/col is removed before clustering''')
parser.add_argument('-p', '--prefix', required = True,
help = '''name of the output npz file holding cluster assignments for each value of k
between --mink and --maxk and the correspondingly calculated BIC and AIC''')
parser.add_argument('--includeChromosome', nargs = '*', default = None,
help = 'chromosomes to include in the normalization as space separated list (i.e. chr1 chr2 ...)')
parser.add_argument('-e', '--exclude', default = None,
help = '''ranges of rows with respect to the genome wide matrix to exclude from the analysis
has to be passed as comma-separated list of integers delimited by a colon
e.g. i1:i2,i3:i4,...''')
parser.add_argument('--imputerows', default = None,
help = '''ranges of rows with respect to the genome wide matrix that should be imputed at --imputecols positions by row
normal distributed values has to be passed as integers delimited by a colon e.g. i1:i2''')
parser.add_argument('--imputecols', default = None,
help = '''ranges of cols with respect to the genome wide matrix that should be imputed at --imputerows positions
has to be passed as integers delimited by a colon e.g. i1:i2''')
parser.add_argument('-pd', '--plotdir', default = None,
help = 'directory to which to write the plots to')
parser.add_argument('-o', '--outdir', default = '.',
help = 'directory to write outputfiles to')
parser.add_argument('--noScale', default = False, action = 'store_true',
help = 'if set bypasses scaling of normalized matrix')
parser.add_argument('--scaleFactor', default = 100000, type = float,
help = 'factor used to scale the matrix')
args = parser.parse_args()
if args.plotdir == None:
plotdir = args.outdir
else:
plotdir = args.plotdir
imputerows, imputecols = [], []
if args.imputerows and args.imputecols:
imputerows = list(range(*[int(i) for i in args.imputerows.split(':')]))
imputecols = list(range(*[int(i) for i in args.imputecols.split(':')]))
elif (args.imputecols and not args.imputerows) or (args.imputerows and not args.imputecols):
raise RuntimeError('Both, --imputerows and --imputecols are required for imputation of values')
logging.info('reading in normalized contact matrix')
if args.inputFormat == 'npz':
npz = np.load(args.matrix)
gwmat, indarr, chrlist = [npz[key] for key in ['cm', 'inds', 'chrlist']]
elif args.inputFormat == 'h5':
gwmat, indarr, chrlist = loadH5(args.matrix,
csr = False,
includechroms = args.includeChromosome,
dtype = float)
else:
gwmat, indarr, chrlist = loadCooler(args.matrix,
applyNorm = True,
norm = args.weightName,
includeChroms = args.includeChromosome,
nans_to_zero = True)
if not args.noScale:
gwmat *= args.scaleFactor
logging.info('constructing clustering matrices')
excluderows = []
if args.exclude:
for r in args.exclude.split(','):
i1, i2 = [int(i) for i in r.split(':')]
excluderows += list(range(i1, i2))
clustmats = {'odd': None, 'even': None}
remcols = {'oddremcols': None, 'evenremcols': None}
remrows = {'oddremrows': None, 'evenremrows': None}
for key, even in zip(['even', 'odd'], [True, False]):
clustmats[key], remrows[key + 'remrows'], remcols[key + 'remcols'] = \
constructClusterContactMatrix(gwmat,
chrlist,
indarr,
even = even,
excluderows = excluderows.copy(),
imputerows = imputerows.copy(),
imputecols = imputecols.copy(),
removelim = args.removelim)
nr, nc = clustmats[key].shape
logging.info('removed %0.2f percent of rows and %0.2f percent of cols with > %0.2f percent 0 entries for clustermatrix of %s chromosomes'
% ((1 - nr / (nr + len(remrows[key + 'remrows']))) * 100,
(1 - nc / (nc + len(remcols[key + 'remcols']))) * 100,
args.removelim * 100, key))
logging.info('performing clustering for clusters k between %i and %i' % (args.mink, args.maxk))
ICdict = {key: np.zeros(shape = args.maxk) for key in ['evenAIC', 'oddAIC', 'evenBIC', 'oddBIC']}
cfig, caxs = plt.subplots(1, 2)
clusterassignments = {}
models = {}
basename = ntpath.basename(args.matrix).split('.')[0]
cmap = plt.get_cmap('jet')
for clustering in ['even', 'odd']:
for k in range(args.mink, args.maxk + 1):
clusters, model = clusterMatrix(clustmats[clustering], k)
ICdict[clustering + 'AIC'][k - 1], ICdict[clustering + 'BIC'][k - 1] = \
computeInformationCriteria(model, clustmats[clustering], k)
clusterassignments[clustering + 'k' + str(k)] = clusters
models[clustering + 'k' + str(k)] = model
colorlist = cmap(np.arange(k)/k)
fig, ax = plt.subplots()
ax, meshes = plotClustering(clustmats[clustering], clusters, ax, colorlist, vmax = 1)
fig.set_figwidth(20)
fig.set_figheight(20)
fig.tight_layout()
fig.savefig(os.path.join(plotdir, '_'.join([basename, clustering, 'k' + str(k)]) + '.pdf'))
plt.close(fig)
for criterion, cax in zip(['AIC', 'BIC'], caxs):
plotInformationCriterion(ICdict[clustering + criterion],
clustering,
args.mink,
args.maxk,
criterion,
cax)
cfig.set_figwidth(8)
cfig.set_figheight(4)
cfig.tight_layout()
cfig.savefig(os.path.join(args.plotdir, '_'.join([basename, 'informationcriterion.pdf'])))
logging.info('saving arrays')
np.savez(os.path.join(args.outdir, args.prefix + '.npz'),
**clusterassignments,
**ICdict,
**remcols,
**remrows)
for key, model in models.items():
with open(os.path.join(args.outdir, args.prefix + 'hmm' + key + '.pkl'), 'wb') as file:
pickle.dump(model, file)
| [
"numpy.log",
"numpy.count_nonzero",
"numpy.array",
"logging.info",
"numpy.arange",
"numpy.histogram",
"argparse.ArgumentParser",
"numpy.where",
"numpy.ma.masked_where",
"matplotlib.pyplot.close",
"numpy.isinf",
"hmmlearn.hmm.GaussianHMM",
"numpy.tile",
"numpy.triu_indices",
"tables.open_... | [((20269, 20344), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s - %(message)s', level=logging.INFO)\n", (20288, 20344), False, 'import logging\n'), ((20354, 20373), 'argparse.ArgumentParser', 'ap.ArgumentParser', ([], {}), '()\n', (20371, 20373), True, 'import argparse as ap\n'), ((23983, 24035), 'logging.info', 'logging.info', (['"""reading in normalized contact matrix"""'], {}), "('reading in normalized contact matrix')\n", (23995, 24035), False, 'import logging\n'), ((24807, 24855), 'logging.info', 'logging.info', (['"""constructing clustering matrices"""'], {}), "('constructing clustering matrices')\n", (24819, 24855), False, 'import logging\n'), ((26137, 26237), 'logging.info', 'logging.info', (["('performing clustering for clusters k between %i and %i' % (args.mink,\n args.maxk))"], {}), "('performing clustering for clusters k between %i and %i' % (\n args.mink, args.maxk))\n", (26149, 26237), False, 'import logging\n'), ((26344, 26362), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (26356, 26362), True, 'import matplotlib.pyplot as plt\n'), ((26460, 26479), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (26472, 26479), True, 'import matplotlib.pyplot as plt\n'), ((27764, 27793), 'logging.info', 'logging.info', (['"""saving arrays"""'], {}), "('saving arrays')\n", (27776, 27793), False, 'import logging\n'), ((5133, 5157), 'cooler.Cooler', 'cooler.Cooler', (['cooleruri'], {}), '(cooleruri)\n', (5146, 5157), False, 'import cooler\n'), ((9361, 9403), 'numpy.zeros', 'np.zeros', ([], {'shape': 'gwmat.shape[1]', 'dtype': 'bool'}), '(shape=gwmat.shape[1], dtype=bool)\n', (9369, 9403), True, 'import numpy as np\n'), ((9423, 9465), 'numpy.zeros', 'np.zeros', ([], {'shape': 'gwmat.shape[0]', 'dtype': 'bool'}), '(shape=gwmat.shape[0], dtype=bool)\n', (9431, 9465), True, 'import numpy as np\n'), ((16282, 16386), 'hmmlearn.hmm.GaussianHMM', 'GaussianHMM', ([], {'n_components': 'n_components', 'covariance_type': 'covariance_type', 'n_iter': 'n_iter', 'verbose': '(True)'}), '(n_components=n_components, covariance_type=covariance_type,\n n_iter=n_iter, verbose=True)\n', (16293, 16386), False, 'from hmmlearn.hmm import GaussianHMM\n'), ((24076, 24096), 'numpy.load', 'np.load', (['args.matrix'], {}), '(args.matrix)\n', (24083, 24096), True, 'import numpy as np\n'), ((26248, 26273), 'numpy.zeros', 'np.zeros', ([], {'shape': 'args.maxk'}), '(shape=args.maxk)\n', (26256, 26273), True, 'import numpy as np\n'), ((27803, 27850), 'os.path.join', 'os.path.join', (['args.outdir', "(args.prefix + '.npz')"], {}), "(args.outdir, args.prefix + '.npz')\n", (27815, 27850), False, 'import os, ntpath\n'), ((1475, 1501), 'tables.open_file', 'tables.open_file', (['filename'], {}), '(filename)\n', (1491, 1501), False, 'import tables\n'), ((4164, 4196), 'numpy.triu_indices', 'np.triu_indices', (['x.shape[0]'], {'k': '(1)'}), '(x.shape[0], k=1)\n', (4179, 4196), True, 'import numpy as np\n'), ((5783, 5826), 'numpy.zeros', 'np.zeros', ([], {'shape': 'matrix.shape[0]', 'dtype': 'bool'}), '(shape=matrix.shape[0], dtype=bool)\n', (5791, 5826), True, 'import numpy as np\n'), ((6292, 6306), 'numpy.array', 'np.array', (['inds'], {}), '(inds)\n', (6300, 6306), True, 'import numpy as np\n'), ((6308, 6324), 'numpy.array', 'np.array', (['chroms'], {}), '(chroms)\n', (6316, 6324), True, 'import numpy as np\n'), ((13312, 13345), 'numpy.where', 'np.where', (['(rowzerofrac > removelim)'], {}), '(rowzerofrac > removelim)\n', (13320, 13345), True, 'import numpy as np\n'), ((13633, 13666), 'numpy.where', 'np.where', (['(colzerofrac > removelim)'], {}), '(colzerofrac > removelim)\n', (13641, 13666), True, 'import numpy as np\n'), ((14052, 14063), 'numpy.log', 'np.log', (['Cij'], {}), '(Cij)\n', (14058, 14063), True, 'import numpy as np\n'), ((15172, 15208), 'scipy.stats.zscore', 'scistats.zscore', (['Cij'], {'axis': '(1)', 'ddof': '(1)'}), '(Cij, axis=1, ddof=1)\n', (15187, 15208), True, 'import scipy.stats as scistats\n'), ((17582, 17601), 'numpy.unique', 'np.unique', (['clusters'], {}), '(clusters)\n', (17591, 17601), True, 'import numpy as np\n'), ((17756, 17785), 'numpy.ma.masked_where', 'np.ma.masked_where', (['mask', 'Cij'], {}), '(mask, Cij)\n', (17774, 17785), True, 'import numpy as np\n'), ((19948, 19973), 'numpy.arange', 'np.arange', (['mink', '(maxk + 1)'], {}), '(mink, maxk + 1)\n', (19957, 19973), True, 'import numpy as np\n'), ((20047, 20072), 'numpy.arange', 'np.arange', (['mink', '(maxk + 1)'], {}), '(mink, maxk + 1)\n', (20056, 20072), True, 'import numpy as np\n'), ((26956, 26970), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (26968, 26970), True, 'import matplotlib.pyplot as plt\n'), ((27259, 27273), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (27268, 27273), True, 'import matplotlib.pyplot as plt\n'), ((28129, 28153), 'pickle.dump', 'pickle.dump', (['model', 'file'], {}), '(model, file)\n', (28140, 28153), False, 'import pickle\n'), ((3630, 3673), 'numpy.zeros', 'np.zeros', ([], {'shape': 'matrix.shape[0]', 'dtype': 'bool'}), '(shape=matrix.shape[0], dtype=bool)\n', (3638, 3673), True, 'import numpy as np\n'), ((4294, 4308), 'numpy.array', 'np.array', (['inds'], {}), '(inds)\n', (4302, 4308), True, 'import numpy as np\n'), ((4310, 4328), 'numpy.array', 'np.array', (['chr_list'], {}), '(chr_list)\n', (4318, 4328), True, 'import numpy as np\n'), ((4374, 4388), 'numpy.array', 'np.array', (['inds'], {}), '(inds)\n', (4382, 4388), True, 'import numpy as np\n'), ((4390, 4408), 'numpy.array', 'np.array', (['chr_list'], {}), '(chr_list)\n', (4398, 4408), True, 'import numpy as np\n'), ((12776, 12789), 'numpy.isnan', 'np.isnan', (['Cij'], {}), '(Cij)\n', (12784, 12789), True, 'import numpy as np\n'), ((12792, 12805), 'numpy.isinf', 'np.isinf', (['Cij'], {}), '(Cij)\n', (12800, 12805), True, 'import numpy as np\n'), ((13134, 13163), 'numpy.count_nonzero', 'np.count_nonzero', (['Cij'], {'axis': '(1)'}), '(Cij, axis=1)\n', (13150, 13163), True, 'import numpy as np\n'), ((13201, 13230), 'numpy.count_nonzero', 'np.count_nonzero', (['Cij'], {'axis': '(0)'}), '(Cij, axis=0)\n', (13217, 13230), True, 'import numpy as np\n'), ((17664, 17712), 'numpy.tile', 'np.tile', (['(clusters != clustnum)', '(Cij.shape[1], 1)'], {}), '(clusters != clustnum, (Cij.shape[1], 1))\n', (17671, 17712), True, 'import numpy as np\n'), ((19717, 19738), 'numpy.log', 'np.log', (['data.shape[0]'], {}), '(data.shape[0])\n', (19723, 19738), True, 'import numpy as np\n'), ((26410, 26438), 'ntpath.basename', 'ntpath.basename', (['args.matrix'], {}), '(args.matrix)\n', (26425, 26438), False, 'import os, ntpath\n'), ((28043, 28104), 'os.path.join', 'os.path.join', (['args.outdir', "(args.prefix + 'hmm' + key + '.pkl')"], {}), "(args.outdir, args.prefix + 'hmm' + key + '.pkl')\n", (28055, 28104), False, 'import os, ntpath\n'), ((1729, 1806), 'logging.info', 'logging.info', (['"""No h5 file. Please check parameters concerning the file type!"""'], {}), "('No h5 file. Please check parameters concerning the file type!')\n", (1741, 1806), False, 'import logging\n'), ((6250, 6266), 'numpy.isnan', 'np.isnan', (['matrix'], {}), '(matrix)\n', (6258, 6266), True, 'import numpy as np\n'), ((9734, 9800), 'numpy.histogram', 'np.histogram', (['indlist'], {'bins': '([i for i in indarr] + [gwmat.shape[0]])'}), '(indlist, bins=[i for i in indarr] + [gwmat.shape[0]])\n', (9746, 9800), True, 'import numpy as np\n'), ((10429, 10495), 'numpy.histogram', 'np.histogram', (['indlist'], {'bins': '([i for i in indarr] + [gwmat.shape[0]])'}), '(indlist, bins=[i for i in indarr] + [gwmat.shape[0]])\n', (10441, 10495), True, 'import numpy as np\n'), ((13464, 13485), 'numpy.array', 'np.array', (['excluderows'], {}), '(excluderows)\n', (13472, 13485), True, 'import numpy as np\n'), ((13785, 13806), 'numpy.array', 'np.array', (['excludecols'], {}), '(excludecols)\n', (13793, 13806), True, 'import numpy as np\n'), ((26922, 26934), 'numpy.arange', 'np.arange', (['k'], {}), '(k)\n', (26931, 26934), True, 'import numpy as np\n'), ((14815, 14838), 'numpy.where', 'np.where', (['(Cij[row] == 0)'], {}), '(Cij[row] == 0)\n', (14823, 14838), True, 'import numpy as np\n'), ((9851, 9874), 'numpy.where', 'np.where', (['(rowcounts > 0)'], {}), '(rowcounts > 0)\n', (9859, 9874), True, 'import numpy as np\n'), ((10544, 10567), 'numpy.where', 'np.where', (['(colcounts > 0)'], {}), '(colcounts > 0)\n', (10552, 10567), True, 'import numpy as np\n')] |
import numpy as np
a = np.arange(6)
b = a.reshape(2,3)
c = np.arange(24).reshape(2,3,4)
d = np.arange(100).reshape(2, -1)
e = np.arange(100).reshape(-1, 5)
f = np.ravel(c)
g = np.arange(10).reshape(2,-1)
print(a, a.shape)
print(b, b.shape)
print(c, c.shape)
print(d, d.shape)
print(e, e.shape)
print(f, f.shape)
print(g.T)
| [
"numpy.ravel",
"numpy.arange"
] | [((25, 37), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (34, 37), True, 'import numpy as np\n'), ((166, 177), 'numpy.ravel', 'np.ravel', (['c'], {}), '(c)\n', (174, 177), True, 'import numpy as np\n'), ((62, 75), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (71, 75), True, 'import numpy as np\n'), ((96, 110), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (105, 110), True, 'import numpy as np\n'), ((130, 144), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (139, 144), True, 'import numpy as np\n'), ((183, 196), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (192, 196), True, 'import numpy as np\n')] |
"""Test tilted backpropagation algorithm"""
import numpy as np
import odtbrain
from common_methods import create_test_sino_3d, create_test_sino_3d_tilted, \
cutout, get_test_parameter_set
def test_3d_backprop_phase_real():
sino, angles = create_test_sino_3d()
parameters = get_test_parameter_set(2)
# reference
rref = list()
for p in parameters:
fref = odtbrain.backpropagate_3d(sino, angles, padval=0,
dtype=np.float64, onlyreal=True, **p)
rref.append(cutout(fref))
dataref = np.array(rref).flatten().view(float)
r = list()
for p in parameters:
f = odtbrain.backpropagate_3d_tilted(sino, angles, padval=0,
dtype=np.float64, onlyreal=True,
**p)
r.append(cutout(f))
data = np.array(r).flatten().view(float)
assert np.allclose(data, dataref)
def test_3d_backprop_pad():
sino, angles = create_test_sino_3d()
parameters = get_test_parameter_set(2)
# reference
rref = list()
for p in parameters:
fref = odtbrain.backpropagate_3d(sino, angles, padval="edge",
dtype=np.float64, onlyreal=False, **p)
rref.append(cutout(fref))
dataref = np.array(rref).flatten().view(float)
r = list()
for p in parameters:
f = odtbrain.backpropagate_3d_tilted(sino, angles, padval="edge",
dtype=np.float64, onlyreal=False,
**p)
r.append(cutout(f))
data = np.array(r).flatten().view(float)
assert np.allclose(data, dataref)
def test_3d_backprop_plane_rotation():
"""
A very soft test to check if planar rotation works fine
in the reconstruction with tilted angles.
"""
parameters = get_test_parameter_set(1)
results = []
# These are specially selected angles that don't give high results.
# Probably due to phase-wrapping, errors >2 may appear. Hence, we
# call it a soft test.
tilts = [1.1, 0.0, 0.234, 2.80922, -.29, 9.87]
for angz in tilts:
sino, angles = create_test_sino_3d_tilted(tilt_plane=angz, A=21)
rotmat = np.array([
[np.cos(angz), -np.sin(angz), 0],
[np.sin(angz), np.cos(angz), 0],
[0, 0, 1],
])
# rotate `tilted_axis` onto the y-z plane.
tilted_axis = np.dot(rotmat, [0, 1, 0])
rref = list()
for p in parameters:
fref = odtbrain.backpropagate_3d_tilted(sino, angles,
padval="edge",
tilted_axis=tilted_axis,
padding=(False, False),
dtype=np.float64,
onlyreal=False,
**p)
rref.append(cutout(fref))
data = np.array(rref).flatten().view(float)
results.append(data)
for ii in np.arange(len(results)):
assert np.allclose(results[ii], results[ii-1], atol=.2, rtol=.2)
def test_3d_backprop_plane_alignment_along_axes():
"""
Tests whether the reconstruction is always aligned with
the rotational axis (and not antiparallel).
"""
parameters = get_test_parameter_set(1)
p = parameters[0]
results = []
# These are specially selected angles that don't give high results.
# Probably due to phase-wrapping, errors >2 may appear. Hence, we
# call it a soft test.
tilts = [0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi]
for angz in tilts:
sino, angles = create_test_sino_3d_tilted(tilt_plane=angz, A=21)
rotmat = np.array([
[np.cos(angz), -np.sin(angz), 0],
[np.sin(angz), np.cos(angz), 0],
[0, 0, 1],
])
# rotate `tilted_axis` onto the y-z plane.
tilted_axis = np.dot(rotmat, [0, 1, 0])
fref = odtbrain.backpropagate_3d_tilted(sino, angles,
padval="edge",
tilted_axis=tilted_axis,
padding=(False, False),
dtype=np.float64,
onlyreal=True,
**p)
results.append(fref)
for ii in np.arange(len(results)):
assert np.allclose(results[ii], results[ii-1], atol=.2, rtol=.2)
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
| [
"numpy.allclose",
"common_methods.get_test_parameter_set",
"odtbrain.backpropagate_3d_tilted",
"numpy.array",
"numpy.dot",
"common_methods.create_test_sino_3d_tilted",
"common_methods.cutout",
"numpy.cos",
"numpy.sin",
"odtbrain.backpropagate_3d",
"common_methods.create_test_sino_3d"
] | [((250, 271), 'common_methods.create_test_sino_3d', 'create_test_sino_3d', ([], {}), '()\n', (269, 271), False, 'from common_methods import create_test_sino_3d, create_test_sino_3d_tilted, cutout, get_test_parameter_set\n'), ((289, 314), 'common_methods.get_test_parameter_set', 'get_test_parameter_set', (['(2)'], {}), '(2)\n', (311, 314), False, 'from common_methods import create_test_sino_3d, create_test_sino_3d_tilted, cutout, get_test_parameter_set\n'), ((925, 951), 'numpy.allclose', 'np.allclose', (['data', 'dataref'], {}), '(data, dataref)\n', (936, 951), True, 'import numpy as np\n'), ((1001, 1022), 'common_methods.create_test_sino_3d', 'create_test_sino_3d', ([], {}), '()\n', (1020, 1022), False, 'from common_methods import create_test_sino_3d, create_test_sino_3d_tilted, cutout, get_test_parameter_set\n'), ((1040, 1065), 'common_methods.get_test_parameter_set', 'get_test_parameter_set', (['(2)'], {}), '(2)\n', (1062, 1065), False, 'from common_methods import create_test_sino_3d, create_test_sino_3d_tilted, cutout, get_test_parameter_set\n'), ((1689, 1715), 'numpy.allclose', 'np.allclose', (['data', 'dataref'], {}), '(data, dataref)\n', (1700, 1715), True, 'import numpy as np\n'), ((1896, 1921), 'common_methods.get_test_parameter_set', 'get_test_parameter_set', (['(1)'], {}), '(1)\n', (1918, 1921), False, 'from common_methods import create_test_sino_3d, create_test_sino_3d_tilted, cutout, get_test_parameter_set\n'), ((3481, 3506), 'common_methods.get_test_parameter_set', 'get_test_parameter_set', (['(1)'], {}), '(1)\n', (3503, 3506), False, 'from common_methods import create_test_sino_3d, create_test_sino_3d_tilted, cutout, get_test_parameter_set\n'), ((389, 480), 'odtbrain.backpropagate_3d', 'odtbrain.backpropagate_3d', (['sino', 'angles'], {'padval': '(0)', 'dtype': 'np.float64', 'onlyreal': '(True)'}), '(sino, angles, padval=0, dtype=np.float64,\n onlyreal=True, **p)\n', (414, 480), False, 'import odtbrain\n'), ((656, 754), 'odtbrain.backpropagate_3d_tilted', 'odtbrain.backpropagate_3d_tilted', (['sino', 'angles'], {'padval': '(0)', 'dtype': 'np.float64', 'onlyreal': '(True)'}), '(sino, angles, padval=0, dtype=np.float64,\n onlyreal=True, **p)\n', (688, 754), False, 'import odtbrain\n'), ((1140, 1237), 'odtbrain.backpropagate_3d', 'odtbrain.backpropagate_3d', (['sino', 'angles'], {'padval': '"""edge"""', 'dtype': 'np.float64', 'onlyreal': '(False)'}), "(sino, angles, padval='edge', dtype=np.float64,\n onlyreal=False, **p)\n", (1165, 1237), False, 'import odtbrain\n'), ((1413, 1518), 'odtbrain.backpropagate_3d_tilted', 'odtbrain.backpropagate_3d_tilted', (['sino', 'angles'], {'padval': '"""edge"""', 'dtype': 'np.float64', 'onlyreal': '(False)'}), "(sino, angles, padval='edge', dtype=np.\n float64, onlyreal=False, **p)\n", (1445, 1518), False, 'import odtbrain\n'), ((2207, 2256), 'common_methods.create_test_sino_3d_tilted', 'create_test_sino_3d_tilted', ([], {'tilt_plane': 'angz', 'A': '(21)'}), '(tilt_plane=angz, A=21)\n', (2233, 2256), False, 'from common_methods import create_test_sino_3d, create_test_sino_3d_tilted, cutout, get_test_parameter_set\n'), ((2496, 2521), 'numpy.dot', 'np.dot', (['rotmat', '[0, 1, 0]'], {}), '(rotmat, [0, 1, 0])\n', (2502, 2521), True, 'import numpy as np\n'), ((3229, 3290), 'numpy.allclose', 'np.allclose', (['results[ii]', 'results[ii - 1]'], {'atol': '(0.2)', 'rtol': '(0.2)'}), '(results[ii], results[ii - 1], atol=0.2, rtol=0.2)\n', (3240, 3290), True, 'import numpy as np\n'), ((3815, 3864), 'common_methods.create_test_sino_3d_tilted', 'create_test_sino_3d_tilted', ([], {'tilt_plane': 'angz', 'A': '(21)'}), '(tilt_plane=angz, A=21)\n', (3841, 3864), False, 'from common_methods import create_test_sino_3d, create_test_sino_3d_tilted, cutout, get_test_parameter_set\n'), ((4104, 4129), 'numpy.dot', 'np.dot', (['rotmat', '[0, 1, 0]'], {}), '(rotmat, [0, 1, 0])\n', (4110, 4129), True, 'import numpy as np\n'), ((4145, 4298), 'odtbrain.backpropagate_3d_tilted', 'odtbrain.backpropagate_3d_tilted', (['sino', 'angles'], {'padval': '"""edge"""', 'tilted_axis': 'tilted_axis', 'padding': '(False, False)', 'dtype': 'np.float64', 'onlyreal': '(True)'}), "(sino, angles, padval='edge', tilted_axis=\n tilted_axis, padding=(False, False), dtype=np.float64, onlyreal=True, **p)\n", (4177, 4298), False, 'import odtbrain\n'), ((4666, 4727), 'numpy.allclose', 'np.allclose', (['results[ii]', 'results[ii - 1]'], {'atol': '(0.2)', 'rtol': '(0.2)'}), '(results[ii], results[ii - 1], atol=0.2, rtol=0.2)\n', (4677, 4727), True, 'import numpy as np\n'), ((538, 550), 'common_methods.cutout', 'cutout', (['fref'], {}), '(fref)\n', (544, 550), False, 'from common_methods import create_test_sino_3d, create_test_sino_3d_tilted, cutout, get_test_parameter_set\n'), ((858, 867), 'common_methods.cutout', 'cutout', (['f'], {}), '(f)\n', (864, 867), False, 'from common_methods import create_test_sino_3d, create_test_sino_3d_tilted, cutout, get_test_parameter_set\n'), ((1295, 1307), 'common_methods.cutout', 'cutout', (['fref'], {}), '(fref)\n', (1301, 1307), False, 'from common_methods import create_test_sino_3d, create_test_sino_3d_tilted, cutout, get_test_parameter_set\n'), ((1621, 1630), 'common_methods.cutout', 'cutout', (['f'], {}), '(f)\n', (1627, 1630), False, 'from common_methods import create_test_sino_3d, create_test_sino_3d_tilted, cutout, get_test_parameter_set\n'), ((2593, 2747), 'odtbrain.backpropagate_3d_tilted', 'odtbrain.backpropagate_3d_tilted', (['sino', 'angles'], {'padval': '"""edge"""', 'tilted_axis': 'tilted_axis', 'padding': '(False, False)', 'dtype': 'np.float64', 'onlyreal': '(False)'}), "(sino, angles, padval='edge', tilted_axis=\n tilted_axis, padding=(False, False), dtype=np.float64, onlyreal=False, **p)\n", (2625, 2747), False, 'import odtbrain\n'), ((3079, 3091), 'common_methods.cutout', 'cutout', (['fref'], {}), '(fref)\n', (3085, 3091), False, 'from common_methods import create_test_sino_3d, create_test_sino_3d_tilted, cutout, get_test_parameter_set\n'), ((566, 580), 'numpy.array', 'np.array', (['rref'], {}), '(rref)\n', (574, 580), True, 'import numpy as np\n'), ((880, 891), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (888, 891), True, 'import numpy as np\n'), ((1323, 1337), 'numpy.array', 'np.array', (['rref'], {}), '(rref)\n', (1331, 1337), True, 'import numpy as np\n'), ((1643, 1654), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (1651, 1654), True, 'import numpy as np\n'), ((2298, 2310), 'numpy.cos', 'np.cos', (['angz'], {}), '(angz)\n', (2304, 2310), True, 'import numpy as np\n'), ((2344, 2356), 'numpy.sin', 'np.sin', (['angz'], {}), '(angz)\n', (2350, 2356), True, 'import numpy as np\n'), ((2359, 2371), 'numpy.cos', 'np.cos', (['angz'], {}), '(angz)\n', (2365, 2371), True, 'import numpy as np\n'), ((3906, 3918), 'numpy.cos', 'np.cos', (['angz'], {}), '(angz)\n', (3912, 3918), True, 'import numpy as np\n'), ((3952, 3964), 'numpy.sin', 'np.sin', (['angz'], {}), '(angz)\n', (3958, 3964), True, 'import numpy as np\n'), ((3967, 3979), 'numpy.cos', 'np.cos', (['angz'], {}), '(angz)\n', (3973, 3979), True, 'import numpy as np\n'), ((2313, 2325), 'numpy.sin', 'np.sin', (['angz'], {}), '(angz)\n', (2319, 2325), True, 'import numpy as np\n'), ((3108, 3122), 'numpy.array', 'np.array', (['rref'], {}), '(rref)\n', (3116, 3122), True, 'import numpy as np\n'), ((3921, 3933), 'numpy.sin', 'np.sin', (['angz'], {}), '(angz)\n', (3927, 3933), True, 'import numpy as np\n')] |
from torchvision import datasets, transforms
import torch
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
def load_train_data(dataset_name, batch_size, val_split=0.9, dataset_seed=0, resolution=32):
if dataset_name.lower() == "mnist":
dataset = datasets.MNIST('./data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
image_size = 28*28
elif dataset_name.lower() == 'cifar':
dataset = datasets.CIFAR10('./data/cifar10', train=True, download=True,
transform=transforms.Compose([
transforms.Grayscale(),
transforms.ToTensor()
]))
image_size = 32*32
elif dataset_name.lower() == 'stl':
dataset = datasets.STL10('./data/stl10', split='unlabeled', download=True,
transform=transforms.Compose([
transforms.Grayscale(),
transforms.Resize((resolution,resolution)),
transforms.ToTensor()
]))
image_size = resolution*resolution
else:
dataset = None
image_size = None
indices = np.arange(0, len(dataset))
np.random.seed(dataset_seed)
np.random.shuffle(indices)
train_amnt = int(len(indices) * val_split)
train_indices = indices[:train_amnt]
val_indices = indices[train_amnt:]
train_sampler = SubsetRandomSampler(train_indices)
val_sampler = SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=val_sampler)
return train_loader, val_loader, image_size
def load_test_data(dataset_name, batch_size):
if dataset_name.lower() == "mnist":
dataset = datasets.MNIST('./data', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor()
]))
else:
dataset = None
test_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
return test_loader
| [
"torch.utils.data.sampler.SubsetRandomSampler",
"torchvision.transforms.Grayscale",
"numpy.random.seed",
"torch.utils.data.DataLoader",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"numpy.random.shuffle"
] | [((1483, 1511), 'numpy.random.seed', 'np.random.seed', (['dataset_seed'], {}), '(dataset_seed)\n', (1497, 1511), True, 'import numpy as np\n'), ((1516, 1542), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (1533, 1542), True, 'import numpy as np\n'), ((1690, 1724), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_indices'], {}), '(train_indices)\n', (1709, 1724), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((1743, 1775), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['val_indices'], {}), '(val_indices)\n', (1762, 1775), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((1795, 1882), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'sampler': 'train_sampler'}), '(dataset, batch_size=batch_size, sampler=\n train_sampler)\n', (1822, 1882), False, 'import torch\n'), ((1895, 1980), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'sampler': 'val_sampler'}), '(dataset, batch_size=batch_size, sampler=val_sampler\n )\n', (1922, 1980), False, 'import torch\n'), ((2394, 2467), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(dataset, batch_size=batch_size, shuffle=True)\n', (2421, 2467), False, 'import torch\n'), ((446, 467), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (465, 467), False, 'from torchvision import datasets, transforms\n'), ((2284, 2305), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2303, 2305), False, 'from torchvision import datasets, transforms\n'), ((759, 781), 'torchvision.transforms.Grayscale', 'transforms.Grayscale', ([], {}), '()\n', (779, 781), False, 'from torchvision import datasets, transforms\n'), ((822, 843), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (841, 843), False, 'from torchvision import datasets, transforms\n'), ((1134, 1156), 'torchvision.transforms.Grayscale', 'transforms.Grayscale', ([], {}), '()\n', (1154, 1156), False, 'from torchvision import datasets, transforms\n'), ((1195, 1238), 'torchvision.transforms.Resize', 'transforms.Resize', (['(resolution, resolution)'], {}), '((resolution, resolution))\n', (1212, 1238), False, 'from torchvision import datasets, transforms\n'), ((1276, 1297), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1295, 1297), False, 'from torchvision import datasets, transforms\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.