code
stringlengths
17
6.64M
def train_val_test_split(df, train_size=0.8, has_val=True): 'Return a tuple (DataFrame, DatasetDict) with a custom train/val/split' if isinstance(train_size, int): train_size = (train_size / len(df)) df = df.sample(frac=1, random_state=0) (df_train, df_test) = train_test_split(df, test_size=(1 - train_size), stratify=df['label']) if has_val: (df_test, df_val) = train_test_split(df_test, test_size=0.5, stratify=df_test['label']) return ((df_train, df_val, df_test), datasets.DatasetDict({'train': datasets.Dataset.from_pandas(df_train), 'val': datasets.Dataset.from_pandas(df_val), 'test': datasets.Dataset.from_pandas(df_test)})) else: return ((df_train, df_test), datasets.DatasetDict({'train': datasets.Dataset.from_pandas(df_train), 'test': datasets.Dataset.from_pandas(df_test)}))
class EvalOnTrainCallback(TrainerCallback): 'Custom callback to evaluate on the training set during training.' def __init__(self, trainer) -> None: super().__init__() self._trainer = trainer def on_epoch_end(self, args, state, control, **kwargs): if control.should_evaluate: control_train = copy.deepcopy(control) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset, metric_key_prefix='train') return control_train
def get_trainer(model, dataset, tokenizer=None): 'Return a trainer object for transformer models.' def compute_metrics(y_pred): 'Computer metrics during training.' (logits, labels) = y_pred predictions = np.argmax(logits, axis=(- 1)) return evaluate.load('f1').compute(predictions=predictions, references=labels, average='macro') if (type(model).__name__ == 'SetFitModel'): trainer = SetFitTrainer(model=model, train_dataset=dataset['train'], eval_dataset=dataset['val'], loss_class=CosineSimilarityLoss, metric='f1', batch_size=16, num_iterations=20, num_epochs=3) return trainer elif (('T5' in type(model).__name__) or ('FLAN' in type(model).__name__)): def compute_metrics_t5(y_pred, verbose=0): 'Computer metrics during training for T5-like models.' (predictions, labels) = y_pred predictions = tokenizer.batch_decode(predictions, skip_special_tokens=True) labels = np.where((labels != (- 100)), labels, tokenizer.pad_token_id) labels = tokenizer.batch_decode(labels, skip_special_tokens=True) predictions = [(1 if ('spam' in predictions[i]) else 0) for i in range(len(predictions))] labels = [(1 if ('spam' in labels[i]) else 0) for i in range(len(labels))] result = evaluate.load('f1').compute(predictions=predictions, references=labels, average='macro') return result data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, label_pad_token_id=(- 100), pad_to_multiple_of=8) training_args = Seq2SeqTrainingArguments(output_dir='experiments', per_device_train_batch_size=8, per_device_eval_batch_size=8, learning_rate=5e-05, num_train_epochs=5, predict_with_generate=True, fp16=False, evaluation_strategy='epoch', save_strategy='epoch', load_best_model_at_end=True, save_total_limit=5) trainer = Seq2SeqTrainer(model=model, args=training_args, train_dataset=dataset['train'], eval_dataset=dataset['val'], data_collator=data_collator, compute_metrics=compute_metrics_t5) trainer.add_callback(EvalOnTrainCallback(trainer)) return trainer else: training_args = TrainingArguments(output_dir='experiments', per_device_train_batch_size=16, per_device_eval_batch_size=8, learning_rate=5e-05, num_train_epochs=10, evaluation_strategy='epoch', save_strategy='epoch', load_best_model_at_end=True, save_total_limit=10) trainer = Trainer(model=model, args=training_args, train_dataset=dataset['train'], eval_dataset=dataset['val'], compute_metrics=compute_metrics) trainer.add_callback(EvalOnTrainCallback(trainer)) return trainer
def predict(trainer, model, dataset, tokenizer=None): 'Convert the predict function to specific classes to unify the API.' if (type(model).__name__ == 'SetFitModel'): return model(dataset['text']) elif ('T5' in type(model).__name__): predictions = trainer.predict(dataset) predictions = tokenizer.batch_decode(predictions.predictions, skip_special_tokens=True) predictions = [(1 if ('spam' in predictions[i]) else 0) for i in range(len(predictions))] return predictions else: return trainer.predict(dataset).predictions.argmax(axis=(- 1))
def train_llms(seeds, datasets, train_sizes, test_set='test'): 'Train all the large language models.' for seed in list(seeds): set_seed(seed) for dataset_name in list(datasets): for train_size in train_sizes: scores = pd.DataFrame(index=list(LLMS.keys()), columns=(list(SCORING.keys()) + ['training_time', 'inference_time'])) df = get_dataset(dataset_name) (_, dataset) = train_val_test_split(df, train_size=train_size, has_val=True) experiment = f'llm_{dataset_name}_{test_set}_{train_size}_train_seed_{seed}' for (model_name, (model, tokenizer)) in LLMS.items(): tokenized_dataset = tokenize(dataset, tokenizer) trainer = get_trainer(model, tokenized_dataset, tokenizer) start = time.time() train_result = trainer.train() end = time.time() scores.loc[model_name]['training_time'] = (end - start) if ('SetFit' not in model_name): log = pd.DataFrame(trainer.state.log_history) log.to_csv(f'outputs/csv/loss_{model_name}_{experiment}.csv') plot_loss(experiment, dataset_name, model_name) start = time.time() predictions = predict(trainer, model, tokenized_dataset[test_set], tokenizer) end = time.time() for (score_name, score_fn) in SCORING.items(): scores.loc[model_name][score_name] = score_fn(dataset[test_set]['label'], predictions) scores.loc[model_name]['inference_time'] = (end - start) save_scores(experiment, model_name, scores.loc[model_name].to_dict()) plot_scores(experiment, dataset_name) print(scores)
def train_baselines(seeds, datasets, train_sizes, test_set='test'): 'Train all the baseline models.' init_nltk() for seed in list(seeds): set_seed(seed) for dataset_name in list(datasets): for train_size in train_sizes: scores = pd.DataFrame(index=list(MODELS.keys()), columns=(list(SCORING.keys()) + ['training_time', 'inference_time'])) df = get_dataset(dataset_name) df = transform_df(df) ((df_train, df_val, df_test), _) = train_val_test_split(df, train_size=train_size, has_val=True) experiment = f'ml_{dataset_name}_{test_set}_{train_size}_train_seed_{seed}' for (model_name, (model, max_iter)) in MODELS.items(): encoder = TfidfVectorizer(max_features=max_iter) (X_train, y_train, encoder) = encode_df(df_train, encoder) (X_test, y_test, encoder) = encode_df(df_test, encoder) if (test_set == 'val'): cv = cross_validate(model, X_train, y_train, scoring=list(SCORING.keys()), cv=5, n_jobs=(- 1)) for (score_name, score_fn) in SCORING.items(): scores.loc[model_name][score_name] = cv[f'test_{score_name}'].mean() if (test_set == 'test'): start = time.time() model.fit(X_train, y_train) end = time.time() scores.loc[model_name]['training_time'] = (end - start) start = time.time() y_pred = model.predict(X_test) end = time.time() scores.loc[model_name]['inference_time'] = (end - start) for (score_name, score_fn) in SCORING.items(): scores.loc[model_name][score_name] = score_fn(y_pred, y_test) save_scores(experiment, model_name, scores.loc[model_name].to_dict()) plot_scores(experiment, dataset_name) print(scores)
def init_nltk(): nltk.download('punkt') nltk.download('stopwords')
def tokenize_words(text): 'Tokenize words in text and remove punctuation' text = word_tokenize(str(text).lower()) text = [token for token in text if token.isalnum()] return text
def remove_stopwords(text): 'Remove stopwords from the text' text = [token for token in text if (token not in stopwords.words('english'))] return text
def stem(text): 'Stem the text (originate => origin)' text = [ps.stem(token) for token in text] return text
def transform(text): 'Tokenize, remove stopwords, stem the text' text = tokenize_words(text) text = remove_stopwords(text) text = stem(text) text = ' '.join(text) return text
def transform_df(df): 'Apply the transform function to the dataframe' df['transformed_text'] = df['text'].apply(transform) return df
def encode_df(df, encoder=None): 'Encode the features for training set' if hasattr(encoder, 'vocabulary_'): X = encoder.transform(df['transformed_text']).toarray() else: X = encoder.fit_transform(df['transformed_text']).toarray() y = df['label'].values return (X, y, encoder)
def tokenize(dataset, tokenizer): 'Tokenize dataset' def tokenization(examples): return tokenizer(examples['text'], padding='max_length', truncation=True) def tokenization_t5(examples, padding='max_length'): text = [('classify as ham or spam: ' + item) for item in examples['text']] inputs = tokenizer(text, max_length=tokenizer.model_max_length, padding=padding, truncation=True) labels = tokenizer(text_target=examples['label'], max_length=max_label_length, padding=True, truncation=True) inputs['labels'] = [[(x if (x != tokenizer.pad_token_id) else (- 100)) for x in label] for label in labels['input_ids']] return inputs if (tokenizer is None): return dataset elif ('T5' in type(tokenizer).__name__): dataset = dataset.map((lambda x: {'label': ('ham' if (x['label'] == 0) else 'spam')})) tokenized_label = dataset['train'].map((lambda x: tokenizer(x['label'], truncation=True)), batched=True) max_label_length = max([len(x) for x in tokenized_label['input_ids']]) return dataset.map(tokenization_t5, batched=True, remove_columns=['label']) else: return dataset.map(tokenization, batched=True)
def set_seed(seed) -> None: 'Fix random seeds' random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed)
def plot_loss(experiment: str, dataset_name: str, model_name: str) -> None: 'Plot loss curve for LLMs.' log = pd.read_csv(f'outputs/csv/loss_{model_name}_{experiment}.csv') log = pd.DataFrame(log).iloc[:(- 1)] train_losses = log['train_loss'].dropna().values eval_losses = log['eval_loss'].dropna().values x = np.arange(1, (len(train_losses) + 1), step=1) with plt.style.context(['science', 'high-vis']): (fig, ax) = plt.subplots() plt.plot(x, train_losses, label='Training loss') plt.plot(x, eval_losses, label='Evaluation loss') ax.set_title(f'{model_name} ({dataset_name.upper()})') ax.set_xticks(x, labels=range(1, (len(x) + 1))) ax.set_xlabel('Epochs') ax.set_ylabel('Loss') ax.legend(loc='upper right') Path(f'outputs/pdf/').mkdir(parents=True, exist_ok=True) Path(f'outputs/png/').mkdir(parents=True, exist_ok=True) plt.savefig(f'outputs/pdf/loss_{model_name}_{experiment}.pdf', format='pdf') plt.savefig(f'outputs/png/loss_{model_name}_{experiment}.png', format='png', dpi=300) plt.show()
def plot_scores(experiment: str, dataset_name: str) -> None: 'Plot scores as histogram.' scores = pd.read_csv(f'outputs/csv/{experiment}.csv', index_col=0) x = np.arange(len(scores)) width = 0.2 (fig, ax) = plt.subplots(figsize=(9, 3)) rects1 = ax.bar(x=(x - width), height=scores['f1'], width=width, label='F1 score') rects2 = ax.bar(x=x, height=scores['precision'], width=width, label='Precision') rects3 = ax.bar(x=(x + width), height=scores['recall'], width=width, label='Recall') ax.set_title(f'{dataset_name.upper()}') ax.set_ylabel('Score') ax.set_xticks(x, labels=scores.index, fontsize=10) plt.legend(bbox_to_anchor=(0.5, (- 0.25)), loc='lower center', ncol=4) fig.tight_layout() Path(f'outputs/pdf/').mkdir(parents=True, exist_ok=True) Path(f'outputs/png/').mkdir(parents=True, exist_ok=True) plt.savefig(f'outputs/pdf/{experiment}.pdf', format='pdf') plt.savefig(f'outputs/png/{experiment}.png', format='png', dpi=300) plt.show()
def plot_pie_charts() -> None: 'Plot ham/spam distribution for each dataset.' dataset_names = ['ling', 'sms', 'spamassassin', 'enron'] (fig, axs) = plt.subplots(1, 4, figsize=(16, 4)) for (i, dataset_name) in enumerate(dataset_names): df = get_dataset(dataset_name) axs[i].pie(df['label'].value_counts().to_numpy(), autopct='%1.2f\\%%', pctdistance=0.35, startangle=(- 30), wedgeprops={'width': 0.3}, textprops={'fontsize': 22}) axs[i].set_title(f'''{dataset_name.upper()} ({len(df):,} samples)''', fontsize=24) fig.legend(['spam', 'ham'], bbox_to_anchor=(0.5, (- 0.1)), loc='lower center', ncol=2, prop={'size': 22}) fig.tight_layout() plt.subplots_adjust(wspace=(- 0.3)) plt.savefig(f'outputs/pdf/pie_charts.pdf', format='pdf') plt.savefig(f'outputs/png/pie_charts.png', format='png', dpi=300) plt.show()
def save_scores(experiment: str, index: str, values: dict) -> None: 'Log scores for individual models in the corresponding csv file' llms = ['BERT', 'RoBERTa', 'SetFit-MiniLM', 'SetFit-mpnet', 'FLAN-T5-small', 'FLAN-T5-base'] models = ['NB', 'LR', 'KNN', 'SVM', 'XGBoost', 'LightGBM'] Path(f'outputs/csv/').mkdir(parents=True, exist_ok=True) file = Path(f'outputs/csv/{experiment}.csv') if file.is_file(): scores = pd.read_csv(f'outputs/csv/{experiment}.csv', index_col=0) scores.loc[index] = values else: if (index in llms): scores = pd.DataFrame(index=llms, columns=(list(SCORING.keys()) + ['training_time', 'inference_time'])) else: scores = pd.DataFrame(index=models, columns=(list(SCORING.keys()) + ['training_time', 'inference_time'])) scores.loc[index] = values scores.to_csv(f'outputs/csv/{experiment}.csv')
def Deconv(inputs, f_dim_in, dim, net, batch_size, f_dim_out=None, stride=2): if (f_dim_out is None): f_dim_out = int((f_dim_in / 2)) return tl.layers.DeConv3dLayer(inputs, shape=[4, 4, 4, f_dim_out, f_dim_in], output_shape=[batch_size, dim, dim, dim, f_dim_out], strides=[1, stride, stride, stride, 1], W_init=tf.random_normal_initializer(stddev=0.02), act=tf.identity, name=(('g/net_' + net) + '/deconv'))
def Conv3D(inputs, f_dim_out, net, f_dim_in=None, batch_norm=False, is_train=True): if (f_dim_in is None): f_dim_in = int((f_dim_out / 2)) layer = tl.layers.Conv3dLayer(inputs, shape=[4, 4, 4, f_dim_in, f_dim_out], W_init=tf.random_normal_initializer(stddev=0.02), strides=[1, 2, 2, 2, 1], name=(('d/net_' + net) + '/conv')) if batch_norm: return tl.layers.BatchNormLayer(layer, is_train=is_train, name=(('d/net_' + net) + '/batch_norm')) else: return layer
def generator_64(inputs, is_train=True, reuse=False, batch_size=128, sig=False): (output_size, half, forth, eighth, sixteenth) = (64, 32, 16, 8, 4) gf_dim = 512 with tf.variable_scope('gen', reuse=reuse) as vs: net_0 = tl.layers.InputLayer(inputs, name='g/net_0/in') net_1 = tl.layers.DenseLayer(net_0, n_units=(((gf_dim * sixteenth) * sixteenth) * sixteenth), W_init=tf.random_normal_initializer(stddev=0.02), act=tf.identity, name='g/net_1/dense') net_1 = tl.layers.ReshapeLayer(net_1, shape=[(- 1), sixteenth, sixteenth, sixteenth, gf_dim], name='g/net_1/reshape') net_1 = tl.layers.BatchNormLayer(net_1, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_1/batch_norm') net_1.outputs = tf.nn.relu(net_1.outputs, name='g/net_1/relu') net_2 = Deconv(net_1, gf_dim, eighth, '2', batch_size) net_2 = tl.layers.BatchNormLayer(net_2, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_2/batch_norm') net_2.outputs = tf.nn.relu(net_2.outputs, name='g/net_2/relu') net_3 = Deconv(net_2, int((gf_dim / 2)), forth, '3', batch_size) net_3 = tl.layers.BatchNormLayer(net_3, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_3/batch_norm') net_3.outputs = tf.nn.relu(net_3.outputs, name='g/net_3/relu') net_4 = Deconv(net_3, int((gf_dim / 4)), half, '4', batch_size) net_4 = tl.layers.BatchNormLayer(net_4, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_4/batch_norm') net_4.outputs = tf.nn.relu(net_4.outputs, name='g/net_4/relu') net_5 = Deconv(net_4, int((gf_dim / 8)), output_size, '5', batch_size, f_dim_out=1) net_5.outputs = tf.reshape(net_5.outputs, [batch_size, output_size, output_size, output_size]) if sig: net_5.outputs = tf.nn.sigmoid(net_5.outputs) else: net_5.outputs = tf.nn.tanh(net_5.outputs) return (net_5, net_5.outputs)
def discriminator(inputs, output_size, sig=False, is_train=True, reuse=False, batch_size=128, output_units=1): inputs = tf.reshape(inputs, [batch_size, output_size, output_size, output_size, 1]) df_dim = output_size with tf.variable_scope('dis', reuse=reuse) as vs: net_0 = tl.layers.InputLayer(inputs, name='d/net_0/in') net_1 = Conv3D(net_0, df_dim, '1', f_dim_in=1, batch_norm=False) net_1.outputs = tf.nn.leaky_relu(net_1.outputs, alpha=0.2, name='d/net_1/lrelu') net_2 = Conv3D(net_1, int((df_dim * 2)), '2', batch_norm=True, is_train=is_train) net_2.outputs = tf.nn.leaky_relu(net_2.outputs, alpha=0.2, name='d/net_2/lrelu') net_3 = Conv3D(net_2, int((df_dim * 4)), '3', batch_norm=True, is_train=is_train) net_3.outputs = tf.nn.leaky_relu(net_3.outputs, alpha=0.2, name='d/net_3/lrelu') net_4 = Conv3D(net_3, int((df_dim * 8)), '4', batch_norm=True, is_train=is_train) net_4.outputs = tf.nn.leaky_relu(net_4.outputs, alpha=0.2, name='d/net_4/lrelu') net_5 = FlattenLayer(net_4, name='d/net_5/flatten') net_5 = tl.layers.DenseLayer(net_5, n_units=output_units, act=tf.identity, W_init=tf.random_normal_initializer(stddev=0.02), name='d/net_5/dense') if sig: return (net_5, tf.nn.sigmoid(net_5.outputs)) else: return (net_5, net_5.outputs)
def make_inputs_raw(file_batch): dt = np.dtype((np.uint8, (64, 64, 64))) models = [np.fromfile(f, dtype=dt).reshape((64, 64, 64)) for f in file_batch] models = np.array(models) start_time = time.time() return (models, start_time)
def load_networks(checkpoint_dir, sess, net_g, net_d, epoch=''): print('[*] Loading checkpoints...') if (len(epoch) >= 1): epoch = ('_' + epoch) net_g_name = os.path.join(checkpoint_dir, (('net_g' + epoch) + '.npz')) net_d_name = os.path.join(checkpoint_dir, (('net_d' + epoch) + '.npz')) if (not (os.path.exists(net_g_name) and os.path.exists(net_d_name))): print('[!] Loading checkpoints failed!') else: net_g_loaded_params = tl.files.load_npz(name=net_g_name) net_d_loaded_params = tl.files.load_npz(name=net_d_name) tl.files.assign_params(sess, net_g_loaded_params, net_g) tl.files.assign_params(sess, net_d_loaded_params, net_d) print('[*] Loading Generator and Discriminator checkpoints SUCCESS!')
def save_networks(checkpoint_dir, sess, net_g, net_d, epoch): print('[*] Saving checkpoints...') if (not os.path.exists(checkpoint_dir)): os.makedirs(checkpoint_dir) net_g_name = os.path.join(checkpoint_dir, 'net_g.npz') net_d_name = os.path.join(checkpoint_dir, 'net_d.npz') net_g_iter_name = os.path.join(checkpoint_dir, ('net_g_%d.npz' % epoch)) net_d_iter_name = os.path.join(checkpoint_dir, ('net_d_%d.npz' % epoch)) tl.files.save_npz(net_g.all_params, name=net_g_name, sess=sess) tl.files.save_npz(net_d.all_params, name=net_d_name, sess=sess) tl.files.save_npz(net_g.all_params, name=net_g_iter_name, sess=sess) tl.files.save_npz(net_d.all_params, name=net_d_iter_name, sess=sess) print('[*] Saving checkpoints SUCCESS!')
def save_voxels(save_dir, models, epock): print('Saving the model') np.save((save_dir + str(epock)), models[0])
def savitzky_golay(y, window_size, order, deriv=0, rate=1): from math import factorial try: window_size = np.abs(np.int(window_size)) order = np.abs(np.int(order)) except ValueError: raise ValueError('window_size and order have to be of type int') if (((window_size % 2) != 1) or (window_size < 1)): raise TypeError('window_size size must be a positive odd number') if (window_size < (order + 2)): raise TypeError('window_size is too small for the polynomials order') order_range = range((order + 1)) half_window = ((window_size - 1) // 2) b = np.mat([[(k ** i) for i in order_range] for k in range((- half_window), (half_window + 1))]) m = ((np.linalg.pinv(b).A[deriv] * (rate ** deriv)) * factorial(deriv)) firstvals = (y[0] - np.abs((y[1:(half_window + 1)][::(- 1)] - y[0]))) lastvals = (y[(- 1)] + np.abs((y[((- half_window) - 1):(- 1)][::(- 1)] - y[(- 1)]))) y = np.concatenate((firstvals, y, lastvals)) return np.convolve(m[::(- 1)], y, mode='valid')
def render_graphs(save_dir, epoch, track_g_loss, track_d_loss, epoch_arr): if (not os.path.exists((save_dir + '/plots/'))): os.makedirs((save_dir + '/plots/')) if (len(track_d_loss) > 51): plt.plot(epoch_arr, track_d_loss, color='blue', alpha=0.5) plt.plot(epoch_arr, track_g_loss, color='red', alpha=0.5) plt.legend(("Discriminator's loss", "Generator's loss"), loc='upper right') plt.title('64-3D-GAN') plt.xlabel('Epoch') plt.ylabel('Loss') plt.grid(True) plt.savefig((((save_dir + '/plots/') + str(epoch)) + '.png')) plt.clf()
def save_values(save_dir, track_g_loss, track_d_loss, epoch_arr): np.save((save_dir + '/plots/track_g_loss'), track_g_loss) np.save((save_dir + '/plots/track_d_loss'), track_d_loss) np.save((save_dir + '/plots/epochs'), epoch_arr)
def load_values(save_dir): outputs = [] outputs.append(list(np.load((save_dir + '/plots/track_g_loss.npy')))) outputs.append(list(np.load((save_dir + '/plots/track_d_loss.npy')))) outputs.append(list(np.load((save_dir + '/plots/epochs.npy')))) return outputs
def cal_acc(zeros, ones): accuracy = 0.0 for example in zeros: if (not np.isnan(example[0])): if (example[0] < 0.5): accuracy += 1.0 for example in ones: if (not np.isnan(example[0])): if (example[0] > 0.5): accuracy += 1.0 accuracy = (accuracy / float((len(zeros) + len(ones)))) print(('The accuracy of the discriminator is: ' + str(accuracy))) return accuracy
def Deconv(inputs, f_dim_in, dim, net, batch_size, f_dim_out=None, stride=2): if (f_dim_out is None): f_dim_out = int((f_dim_in / 2)) return tl.layers.DeConv3dLayer(inputs, shape=[4, 4, 4, f_dim_out, f_dim_in], output_shape=[batch_size, dim, dim, dim, f_dim_out], strides=[1, stride, stride, stride, 1], W_init=tf.random_normal_initializer(stddev=0.02), act=tf.identity, name=(('g/net_' + net) + '/deconv'))
def Conv3D(inputs, f_dim_out, net, f_dim_in=None, batch_norm=False, is_train=True): if (f_dim_in is None): f_dim_in = (f_dim_out / 2) layer = tl.layers.Conv3dLayer(inputs, shape=[4, 4, 4, f_dim_in, f_dim_out], W_init=tf.random_normal_initializer(stddev=0.02), strides=[1, 2, 2, 2, 1], name=(('d/net_' + net) + '/conv')) if batch_norm: return tl.layers.BatchNormLayer(layer, is_train=is_train, name=(('d/net_' + net) + '/batch_norm')) else: return layer
def generator_64(inputs, is_train=True, reuse=False, batch_size=128, sig=False): (output_size, half, forth, eighth, sixteenth) = (64, 32, 16, 8, 4) gf_dim = 512 with tf.variable_scope('gen', reuse=reuse) as vs: net_0 = tl.layers.InputLayer(inputs, name='g/net_0/in') net_1 = tl.layers.DenseLayer(net_0, n_units=(((gf_dim * sixteenth) * sixteenth) * sixteenth), W_init=tf.random_normal_initializer(stddev=0.02), act=tf.identity, name='g/net_1/dense') net_1 = tl.layers.ReshapeLayer(net_1, shape=[(- 1), sixteenth, sixteenth, sixteenth, gf_dim], name='g/net_1/reshape') net_1 = tl.layers.BatchNormLayer(net_1, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_1/batch_norm') net_1.outputs = tf.nn.relu(net_1.outputs, name='g/net_1/relu') net_2 = Deconv(net_1, gf_dim, eighth, '2', batch_size) net_2 = tl.layers.BatchNormLayer(net_2, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_2/batch_norm') net_2.outputs = tf.nn.relu(net_2.outputs, name='g/net_2/relu') net_3 = Deconv(net_2, (gf_dim / 2), forth, '3', batch_size) net_3 = tl.layers.BatchNormLayer(net_3, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_3/batch_norm') net_3.outputs = tf.nn.relu(net_3.outputs, name='g/net_3/relu') net_4 = Deconv(net_3, (gf_dim / 4), half, '4', batch_size) net_4 = tl.layers.BatchNormLayer(net_4, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_4/batch_norm') net_4.outputs = tf.nn.relu(net_4.outputs, name='g/net_4/relu') net_5 = Deconv(net_4, (gf_dim / 8), output_size, '5', batch_size, f_dim_out=1) net_5.outputs = tf.reshape(net_5.outputs, [batch_size, output_size, output_size, output_size]) if sig: net_5.outputs = tf.nn.sigmoid(net_5.outputs) else: net_5.outputs = tf.nn.tanh(net_5.outputs) return (net_5, net_5.outputs)
def discriminator(inputs, output_size, improved=False, sig=False, is_train=True, reuse=False, batch_size=128, output_units=1): inputs = tf.reshape(inputs, [batch_size, output_size, output_size, output_size, 1]) df_dim = output_size with tf.variable_scope('dis', reuse=reuse) as vs: net_0 = tl.layers.InputLayer(inputs, name='d/net_0/in') net_1 = Conv3D(net_0, df_dim, '1', f_dim_in=1, batch_norm=False) net_1.outputs = tf.nn.leaky_relu(net_1.outputs, alpha=0.2, name='d/net_1/lrelu') net_2 = Conv3D(net_1, (df_dim * 2), '2', batch_norm=(not improved), is_train=is_train) net_2.outputs = tf.nn.leaky_relu(net_2.outputs, alpha=0.2, name='d/net_2/lrelu') net_3 = Conv3D(net_2, (df_dim * 4), '3', batch_norm=(not improved), is_train=is_train) net_3.outputs = tf.nn.leaky_relu(net_3.outputs, alpha=0.2, name='d/net_3/lrelu') net_4 = Conv3D(net_3, (df_dim * 8), '4', batch_norm=(not improved), is_train=is_train) net_4.outputs = tf.nn.leaky_relu(net_4.outputs, alpha=0.2, name='d/net_4/lrelu') net_5 = FlattenLayer(net_4, name='d/net_5/flatten') net_5 = tl.layers.DenseLayer(net_5, n_units=output_units, act=tf.identity, W_init=tf.random_normal_initializer(stddev=0.02), name='d/net_5/dense') if sig: return (net_5, tf.nn.sigmoid(net_5.outputs)) else: return (net_5, net_5.outputs)
def make_inputs_raw(file_batch): dt = np.dtype((np.uint8, (64, 64, 64))) models = [np.fromfile(f, dtype=dt).reshape((64, 64, 64)) for f in file_batch] start_time = time.time() return (models, start_time)
def load_networks(checkpoint_dir, sess, net_g, net_d, epoch=''): print('[*] Loading checkpoints...') if (len(epoch) >= 1): epoch = ('_' + epoch) net_g_name = os.path.join(checkpoint_dir, (('net_g' + epoch) + '.npz')) net_d_name = os.path.join(checkpoint_dir, (('net_d' + epoch) + '.npz')) if (not (os.path.exists(net_g_name) and os.path.exists(net_d_name))): print('[!] Loading checkpoints failed!') else: net_g_loaded_params = tl.files.load_npz(name=net_g_name) net_d_loaded_params = tl.files.load_npz(name=net_d_name) tl.files.assign_params(sess, net_g_loaded_params, net_g) tl.files.assign_params(sess, net_d_loaded_params, net_d) print('[*] Loading Generator and Discriminator checkpoints SUCCESS!')
def save_networks(checkpoint_dir, sess, net_g, net_d, epoch): print('[*] Saving checkpoints...') if (not os.path.exists(checkpoint_dir)): os.makedirs(checkpoint_dir) net_g_name = os.path.join(checkpoint_dir, 'net_g.npz') net_d_name = os.path.join(checkpoint_dir, 'net_d.npz') net_g_iter_name = os.path.join(checkpoint_dir, ('net_g_%d.npz' % epoch)) net_d_iter_name = os.path.join(checkpoint_dir, ('net_d_%d.npz' % epoch)) tl.files.save_npz(net_g.all_params, name=net_g_name, sess=sess) tl.files.save_npz(net_d.all_params, name=net_d_name, sess=sess) tl.files.save_npz(net_g.all_params, name=net_g_iter_name, sess=sess) tl.files.save_npz(net_d.all_params, name=net_d_iter_name, sess=sess) print('[*] Saving checkpoints SUCCESS!')
def save_voxels(save_dir, models, epock): print('Saving the model') np.save((save_dir + str(epock)), models[0])
def savitzky_golay(y, window_size, order, deriv=0, rate=1): from math import factorial try: window_size = np.abs(np.int(window_size)) order = np.abs(np.int(order)) except ValueError: raise ValueError('window_size and order have to be of type int') if (((window_size % 2) != 1) or (window_size < 1)): raise TypeError('window_size size must be a positive odd number') if (window_size < (order + 2)): raise TypeError('window_size is too small for the polynomials order') order_range = range((order + 1)) half_window = ((window_size - 1) // 2) b = np.mat([[(k ** i) for i in order_range] for k in range((- half_window), (half_window + 1))]) m = ((np.linalg.pinv(b).A[deriv] * (rate ** deriv)) * factorial(deriv)) firstvals = (y[0] - np.abs((y[1:(half_window + 1)][::(- 1)] - y[0]))) lastvals = (y[(- 1)] + np.abs((y[((- half_window) - 1):(- 1)][::(- 1)] - y[(- 1)]))) y = np.concatenate((firstvals, y, lastvals)) return np.convolve(m[::(- 1)], y, mode='valid')
def render_graphs(save_dir, epoch, track_d_loss_iter, track_d_loss, epoch_arr): if (not os.path.exists((save_dir + '/plots/'))): os.makedirs((save_dir + '/plots/')) if (len(track_d_loss) > 51): smoothed_d_loss = savitzky_golay(track_d_loss, 51, 3) plt.plot(epoch_arr, track_d_loss) plt.plot(epoch_arr, smoothed_d_loss, color='red') plt.legend(("Discriminator's loss", 'Savitzky–Golay'), loc='upper right') plt.title('64-3D-IWGAN') plt.xlabel('Epoch') plt.ylabel("Discriminator's loss") plt.grid(True) plt.savefig((((save_dir + '/plots/') + str(epoch)) + '.png')) plt.clf()
def save_values(save_dir, track_d_loss_iter, track_d_loss, epoch_arr): np.save((save_dir + '/plots/track_d_loss_iter'), track_d_loss_iter) np.save((save_dir + '/plots/track_d_loss'), track_d_loss) np.save((save_dir + '/plots/epochs'), epoch_arr)
def load_values(save_dir, valid=False): outputs = [] outputs.append(list(np.load((save_dir + '/plots/track_d_loss_iter.npy')))) outputs.append(list(np.load((save_dir + '/plots/track_d_loss.npy')))) outputs.append(list(np.load((save_dir + '/plots/epochs.npy')))) outputs.append(outputs[0][(- 1)]) return outputs
def Deconv(inputs, f_dim_in, dim, net, batch_size, f_dim_out=None, stride=2): if (f_dim_out is None): f_dim_out = int((f_dim_in / 2)) return tl.layers.DeConv3dLayer(inputs, shape=[4, 4, 4, f_dim_out, f_dim_in], output_shape=[batch_size, dim, dim, dim, f_dim_out], strides=[1, stride, stride, stride, 1], W_init=tf.random_normal_initializer(stddev=0.02), act=tf.identity, name=(('g/net_' + net) + '/deconv'))
def Conv3D(inputs, f_dim_out, net, f_dim_in=None, batch_norm=False, is_train=True): if (f_dim_in is None): f_dim_in = int((f_dim_out / 2)) layer = tl.layers.Conv3dLayer(inputs, shape=[4, 4, 4, f_dim_in, f_dim_out], W_init=tf.random_normal_initializer(stddev=0.02), strides=[1, 2, 2, 2, 1], name=(('d/net_' + net) + '/conv')) if batch_norm: return tl.layers.BatchNormLayer(layer, is_train=is_train, name=(('d/net_' + net) + '/batch_norm')) else: return layer
def generator_64(inputs, is_train=True, reuse=False, batch_size=128, sig=False): (output_size, half, forth, eighth, sixteenth) = (64, 32, 16, 8, 4) gf_dim = 512 with tf.variable_scope('gen', reuse=reuse) as vs: net_0 = tl.layers.InputLayer(inputs, name='g/net_0/in') net_1 = tl.layers.DenseLayer(net_0, n_units=(((gf_dim * sixteenth) * sixteenth) * sixteenth), W_init=tf.random_normal_initializer(stddev=0.02), act=tf.identity, name='g/net_1/dense') net_1 = tl.layers.ReshapeLayer(net_1, shape=[(- 1), sixteenth, sixteenth, sixteenth, gf_dim], name='g/net_1/reshape') net_1 = tl.layers.BatchNormLayer(net_1, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_1/batch_norm') net_1.outputs = tf.nn.relu(net_1.outputs, name='g/net_1/relu') net_2 = Deconv(net_1, gf_dim, eighth, '2', batch_size) net_2 = tl.layers.BatchNormLayer(net_2, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_2/batch_norm') net_2.outputs = tf.nn.relu(net_2.outputs, name='g/net_2/relu') net_3 = Deconv(net_2, int((gf_dim / 2)), forth, '3', batch_size) net_3 = tl.layers.BatchNormLayer(net_3, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_3/batch_norm') net_3.outputs = tf.nn.relu(net_3.outputs, name='g/net_3/relu') net_4 = Deconv(net_3, int((gf_dim / 4)), half, '4', batch_size) net_4 = tl.layers.BatchNormLayer(net_4, is_train=is_train, gamma_init=tf.random_normal_initializer(1.0, 0.02), name='g/net_4/batch_norm') net_4.outputs = tf.nn.relu(net_4.outputs, name='g/net_4/relu') net_5 = Deconv(net_4, int((gf_dim / 8)), output_size, '5', batch_size, f_dim_out=1) net_5.outputs = tf.reshape(net_5.outputs, [batch_size, output_size, output_size, output_size]) if sig: net_5.outputs = tf.nn.sigmoid(net_5.outputs) return (net_5, net_5.outputs)
def discriminator(inputs, output_size, sig=False, is_train=True, reuse=False, batch_size=128, output_units=1): inputs = tf.reshape(inputs, [batch_size, output_size, output_size, output_size, 1]) df_dim = output_size with tf.variable_scope('dis', reuse=reuse) as vs: net_0 = tl.layers.InputLayer(inputs, name='d/net_0/in') net_1 = Conv3D(net_0, df_dim, '1', f_dim_in=1, batch_norm=False) net_1.outputs = tf.nn.leaky_relu(net_1.outputs, alpha=0.2, name='d/net_1/lrelu') net_2 = Conv3D(net_1, int((df_dim * 2)), '2', batch_norm=True, is_train=is_train) net_2.outputs = tf.nn.leaky_relu(net_2.outputs, alpha=0.2, name='d/net_2/lrelu') net_3 = Conv3D(net_2, int((df_dim * 4)), '3', batch_norm=True, is_train=is_train) net_3.outputs = tf.nn.leaky_relu(net_3.outputs, alpha=0.2, name='d/net_3/lrelu') net_4 = Conv3D(net_3, int((df_dim * 8)), '4', batch_norm=True, is_train=is_train) net_4.outputs = tf.nn.leaky_relu(net_4.outputs, alpha=0.2, name='d/net_4/lrelu') net_5 = FlattenLayer(net_4, name='d/net_5/flatten') net_5 = tl.layers.DenseLayer(net_5, n_units=output_units, act=tf.identity, W_init=tf.random_normal_initializer(stddev=0.02), name='d/net_5/dense') if sig: return (net_5, tf.nn.sigmoid(net_5.outputs)) else: return (net_5, net_5.outputs)
def make_inputs_raw(file_batch): dt = np.dtype((np.uint8, (64, 64, 64))) models = [np.fromfile(f, dtype=dt).reshape((64, 64, 64)) for f in file_batch] models = np.array(models) models = models.astype(np.float32) start_time = time.time() return (models, start_time)
def load_networks(checkpoint_dir, sess, net_g, net_d, epoch=''): print('[*] Loading checkpoints...') if (len(epoch) >= 1): epoch = ('_' + epoch) net_g_name = os.path.join(checkpoint_dir, (('net_g' + epoch) + '.npz')) net_d_name = os.path.join(checkpoint_dir, (('net_d' + epoch) + '.npz')) if (not (os.path.exists(net_g_name) and os.path.exists(net_d_name))): print('[!] Loading checkpoints failed!') else: net_g_loaded_params = tl.files.load_npz(name=net_g_name) net_d_loaded_params = tl.files.load_npz(name=net_d_name) tl.files.assign_params(sess, net_g_loaded_params, net_g) tl.files.assign_params(sess, net_d_loaded_params, net_d) print('[*] Loading Generator and Discriminator checkpoints SUCCESS!')
def save_networks(checkpoint_dir, sess, net_g, net_d, epoch): print('[*] Saving checkpoints...') if (not os.path.exists(checkpoint_dir)): os.makedirs(checkpoint_dir) net_g_name = os.path.join(checkpoint_dir, 'net_g.npz') net_d_name = os.path.join(checkpoint_dir, 'net_d.npz') net_g_iter_name = os.path.join(checkpoint_dir, ('net_g_%d.npz' % epoch)) net_d_iter_name = os.path.join(checkpoint_dir, ('net_d_%d.npz' % epoch)) tl.files.save_npz(net_g.all_params, name=net_g_name, sess=sess) tl.files.save_npz(net_d.all_params, name=net_d_name, sess=sess) tl.files.save_npz(net_g.all_params, name=net_g_iter_name, sess=sess) tl.files.save_npz(net_d.all_params, name=net_d_iter_name, sess=sess) print('[*] Saving checkpoints SUCCESS!')
def save_voxels(save_dir, models, epock): print('Saving the model') np.save((save_dir + str(epock)), models[0])
def savitzky_golay(y, window_size, order, deriv=0, rate=1): from math import factorial try: window_size = np.abs(np.int(window_size)) order = np.abs(np.int(order)) except ValueError: raise ValueError('window_size and order have to be of type int') if (((window_size % 2) != 1) or (window_size < 1)): raise TypeError('window_size size must be a positive odd number') if (window_size < (order + 2)): raise TypeError('window_size is too small for the polynomials order') order_range = range((order + 1)) half_window = ((window_size - 1) // 2) b = np.mat([[(k ** i) for i in order_range] for k in range((- half_window), (half_window + 1))]) m = ((np.linalg.pinv(b).A[deriv] * (rate ** deriv)) * factorial(deriv)) firstvals = (y[0] - np.abs((y[1:(half_window + 1)][::(- 1)] - y[0]))) lastvals = (y[(- 1)] + np.abs((y[((- half_window) - 1):(- 1)][::(- 1)] - y[(- 1)]))) y = np.concatenate((firstvals, y, lastvals)) return np.convolve(m[::(- 1)], y, mode='valid')
def render_graphs(save_dir, epoch, track_g_loss, track_d_loss, epoch_arr): if (not os.path.exists((save_dir + '/plots/'))): os.makedirs((save_dir + '/plots/')) if (len(track_d_loss) > 51): smoothed_d_loss = savitzky_golay(track_d_loss, 51, 3) smoothed_g_loss = savitzky_golay(track_g_loss, 51, 3) plt.plot(epoch_arr, track_d_loss, color='cornflowerblue', alpha=0.5) plt.plot(epoch_arr, smoothed_d_loss, color='navy', alpha=0.5) plt.plot(epoch_arr, track_g_loss, color='indianred', alpha=0.5) plt.plot(epoch_arr, smoothed_g_loss, color='crimson', alpha=0.5) plt.legend(("Discriminator's loss", 'D-loss (Savitzky–Golay)', "Generator's loss", 'G-loss (Savitzky–Golay)'), loc='upper right') plt.title(('64-3D-RSGAN [lrG=%.5f, lrD=%.5f]' % (args.generator_learning_rate, args.discriminator_learning_rate))) plt.xlabel('Epoch') plt.ylabel('Loss') plt.grid(True) plt.savefig((((save_dir + '/plots/') + str(epoch)) + '.png')) plt.clf()
def save_values(save_dir, track_g_loss, track_d_loss, epoch_arr): np.save((save_dir + '/plots/track_g_loss'), track_g_loss) np.save((save_dir + '/plots/track_d_loss'), track_d_loss) np.save((save_dir + '/plots/epochs'), epoch_arr)
def load_values(save_dir): outputs = [] outputs.append(list(np.load((save_dir + '/plots/track_g_loss.npy')))) outputs.append(list(np.load((save_dir + '/plots/track_d_loss.npy')))) outputs.append(list(np.load((save_dir + '/plots/epochs.npy')))) return outputs
def continue_download(is40=False): queryStr = 'The ModelNet10.zip file is over 450 MB. Proceed to download (y/n): ' if is40: queryStr = 'The ModelNet40.tar file is 2 GB and over 9 GB uncompressed. Proceed to download (y/n): ' while True: reply = str(input(queryStr)).lower().strip() if (reply[0] == 'y'): return True if (reply[0] == 'n'): return False else: print('please reply with y or n')
def query_dataset(): while True: reply = str(input('Choose dataset, ModelNet10 (1) or manually aligned subset of the ModelNet40 (2):')).lower().strip() if (reply[0] == '1'): return True if (reply[0] == '2'): return False else: print('please reply with 1 or 2')
def camPosToQuaternion(cx, cy, cz): camDist = math.sqrt((((cx * cx) + (cy * cy)) + (cz * cz))) cx = (cx / camDist) cy = (cy / camDist) cz = (cz / camDist) axis = ((- cz), 0, cx) angle = math.acos(cy) a = (math.sqrt(2) / 2) b = (math.sqrt(2) / 2) w1 = axis[0] w2 = axis[1] w3 = axis[2] c = math.cos((angle / 2)) d = math.sin((angle / 2)) q1 = ((a * c) - ((b * d) * w1)) q2 = ((b * c) + ((a * d) * w1)) q3 = (((a * d) * w2) + ((b * d) * w3)) q4 = ((((- b) * d) * w2) + ((a * d) * w3)) return (q1, q2, q3, q4)
def quaternionFromYawPitchRoll(yaw, pitch, roll): c1 = math.cos((yaw / 2.0)) c2 = math.cos((pitch / 2.0)) c3 = math.cos((roll / 2.0)) s1 = math.sin((yaw / 2.0)) s2 = math.sin((pitch / 2.0)) s3 = math.sin((roll / 2.0)) q1 = (((c1 * c2) * c3) + ((s1 * s2) * s3)) q2 = (((c1 * c2) * s3) - ((s1 * s2) * c3)) q3 = (((c1 * s2) * c3) + ((s1 * c2) * s3)) q4 = (((s1 * c2) * c3) - ((c1 * s2) * s3)) return (q1, q2, q3, q4)
def camRotQuaternion(cx, cy, cz, theta): theta = ((theta / 180.0) * math.pi) camDist = math.sqrt((((cx * cx) + (cy * cy)) + (cz * cz))) cx = ((- cx) / camDist) cy = ((- cy) / camDist) cz = ((- cz) / camDist) q1 = math.cos((theta * 0.5)) q2 = ((- cx) * math.sin((theta * 0.5))) q3 = ((- cy) * math.sin((theta * 0.5))) q4 = ((- cz) * math.sin((theta * 0.5))) return (q1, q2, q3, q4)
def quaternionProduct(qx, qy): a = qx[0] b = qx[1] c = qx[2] d = qx[3] e = qy[0] f = qy[1] g = qy[2] h = qy[3] q1 = ((((a * e) - (b * f)) - (c * g)) - (d * h)) q2 = ((((a * f) + (b * e)) + (c * h)) - (d * g)) q3 = ((((a * g) - (b * h)) + (c * e)) + (d * f)) q4 = ((((a * h) + (b * g)) - (c * f)) + (d * e)) return (q1, q2, q3, q4)
def obj_centened_camera_pos(dist, azimuth_deg, elevation_deg): phi = ((float(elevation_deg) / 180) * math.pi) theta = ((float(azimuth_deg) / 180) * math.pi) x = ((dist * math.cos(theta)) * math.cos(phi)) y = ((dist * math.sin(theta)) * math.cos(phi)) z = (dist * math.sin(phi)) return (x, y, z)
def _get_version(): import phantom return phantom.__version__
@ph.msg_payload() class ImpressionRequest(): '\n The message indicating that a user is visiting a website and might be\n interested in an advertisement offer\n\n Attributes:\n -----------\n timestamp (int): the time of the impression\n user_id (int): the unique and anonymous identifier of the user\n\n Methods:\n --------\n generate_random(): helper method to generate random impressions\n\n ' timestamp: float user_id: int @classmethod def generate_random(cls): return cls(timestamp=datetime.datetime.now().timestamp(), user_id=np.random.choice([1, 2]))
@ph.msg_payload() class Bid(): '\n The message sent by the advertiser to the exchange\n to win the impression\n\n Attributes:\n -----------\n bid (float): the cost charged to the advertiser\n theme (str): the theme of the ad that will be displayed\n user_id(str): the user identifier\n\n ' bid: float theme: str user_id: int
@ph.msg_payload() class AuctionResult(): "\n The message sent by the exchange to the advertiser\n to inform her of the auction's result\n\n Attributes:\n -----------\n cost (float): the cost charged to the advertiser\n winning_bid (float): the highest bid during this auction\n\n " cost: float winning_bid: float
@ph.msg_payload() class Ads(): '\n The message sent by an advertisers containing the ads to show to the user.\n For simplicity, it only contains a theme.\n\n Attributes:\n -----------\n advertiser_id (str): the theme of the ads\n theme (str): the theme of the ads\n user_id (int): the user_id that will receive the ads\n\n ' advertiser_id: str theme: str user_id: int
@ph.msg_payload() class ImpressionResult(): '\n The result of the ad display. i.e whether or not the user clicked\n on the ad\n\n Attributes:\n -----------\n clicked (bool): whether or not the user clicked on the ad\n\n ' clicked: bool
class PublisherPolicy(ph.Policy): def compute_action(self, obs: np.ndarray) -> np.ndarray: return np.array([0])
class PublisherAgent(ph.Agent): '\n A `PublisherAgent` generates `ImpressionRequest` which corresponds to\n real-estate on their website rented to advertisers to display their ads.\n\n Attributes:\n -----------\n _USER_CLICK_PROBABILITIES (dict): helper dictionary containing the probability\n to click on the ads for each user. For simplicity, we hardcode these values,\n however more a advanced logic could also be implemented\n ' _USER_CLICK_PROBABILITIES = {1: {'sport': 0.0, 'travel': 1.0, 'science': 0.2, 'tech': 0.8}, 2: {'sport': 1.0, 'travel': 0.0, 'science': 0.7, 'tech': 0.1}} def __init__(self, agent_id: str, exchange_id: str, user_click_proba: dict=None): super().__init__(agent_id) self.exchange_id = exchange_id self.user_click_proba = (user_click_proba or self._USER_CLICK_PROBABILITIES) def generate_messages(self, ctx: ph.Context): return [(self.exchange_id, ImpressionRequest.generate_random())] @ph.agents.msg_handler(Ads) def handle_ads(self, _ctx: ph.Context, msg: ph.Message): '\n Method to process messages with the payload type `Ads`\n\n Note:\n -----\n We register the type of payload to process via the `ph.agents.handle_msg` decorator\n\n Params:\n -------\n ctx (ph.Context): the partially observable context available for\n the agent\n msg (ph.MsgPayload): the message received by the agent.\n\n Returns:\n --------\n receiver_id (ph.AgentID): the unique identifier of the agent the messages are\n intended to\n messages ([ph.MsgPayload]): the messages to send\n\n ' logger.debug('PublisherAgent %s ads: %s', self.id, msg.payload) clicked = np.random.binomial(1, self.user_click_proba[msg.payload.user_id][msg.payload.theme]) return [(msg.payload.advertiser_id, ImpressionResult(clicked=clicked))]
class AdvertiserAgent(ph.StrategicAgent): '\n An `AdvertiserAgent` learns to bid efficiently and within its budget limit, on an impression\n in order to maximize the number of clicks it gets.\n For this implementation an advertiser is associated with a `theme` which will impact the\n probability of a user to click on the ad.\n\n Observation Space:\n - budget left\n - user id\n # - user age\n # - user zipcode\n\n Action Space:\n - bid amount\n ' @dataclass class Supertype(ph.Supertype): budget: float def __init__(self, agent_id: str, exchange_id: str, theme: str='generic'): self.exchange_id = exchange_id self.theme = theme self.action_space = gym.spaces.Box(low=np.array([0.0]), high=np.array([1.0])) super().__init__(agent_id) @property def observation_space(self): return gym.spaces.Dict({'type': self.type.to_obs_space(), 'budget_left': gym.spaces.Box(low=0.0, high=1.0, shape=(1,), dtype=np.float64), 'user_id': gym.spaces.Discrete(2)}) def pre_message_resolution(self, _ctx: ph.Context): '@override\n The `pre_resolution` method is called at the beginning of each step.\n We use this method to reset the number of clicks received during the step.\n ' self.step_clicks = 0 self.step_wins = 0 @ph.agents.msg_handler(ImpressionRequest) def handle_impression_request(self, ctx: ph.Context, msg: ph.Message): '\n Once an `ImpressionRequest` is received we cache the information about the user.\n\n Note:\n -----\n We receive the user id in the message but we collect extra user information from\n the `ctx` object.\n ' logger.debug('AdvertiserAgent %s impression request: %s', self.id, msg.payload) self._current_user_id = msg.payload.user_id self._current_age = ctx[self.exchange_id].users_info[self._current_user_id]['age'] self._current_zipcode = ctx[self.exchange_id].users_info[self._current_user_id]['zipcode'] self.total_requests[self._current_user_id] += 1 @ph.agents.msg_handler(AuctionResult) def handle_auction_result(self, _ctx: ph.Context, msg: ph.MsgPayload): '\n If the `AdvertiserAgent` wins the auction it needs to update its budget left.\n ' logger.debug('AdvertiserAgent %s auction result: %s', self.id, msg.payload) self.step_wins += int((msg.payload.cost != 0.0)) self.total_wins[self._current_user_id] += int((msg.payload.cost != 0.0)) self.left -= msg.payload.cost @ph.agents.msg_handler(ImpressionResult) def handle_impression_result(self, _ctx: ph.Context, msg: ph.MsgPayload): '\n When the result of the ad display is received, update the number of clicks.\n ' logger.debug('AdvertiserAgent %s impression result: %s', self.id, msg.payload) self.step_clicks += int(msg.payload.clicked) self.total_clicks[self._current_user_id] += int(msg.payload.clicked) def encode_observation(self, _ctx: ph.Context): "@override\n The observation will help learn the policy.\n\n For this use case we pass:\n - the budget the agent has left\n - the user id the impression will be for\n - the user's age\n - the user's zipcode\n " if (self._current_user_id != 0): return {'type': self.type.to_obs_space_compatible_type(), 'budget_left': np.array([(self.left / self.type.budget)], dtype=np.float64), 'user_id': (self._current_user_id - 1)} def decode_action(self, ctx: ph.Context, action: np.ndarray): '@override\n We receive the "optimal" bid from the learnt Policy and send a message to the\n exchange to try to win the impression.\n ' logger.debug('AdvertiserAgent %s decode action: %s', self.id, action) msgs = [] self.bid = min((action[0] * self.type.budget), self.left) if (self.bid > 0.0): msg = Bid(bid=self.bid, theme=self.theme, user_id=self._current_user_id) msgs.append((self.exchange_id, msg)) return msgs def compute_reward(self, _ctx: ph.Context) -> float: '@override\n The goal is to maximize the number of clicks so the per-step reward\n is the number of clicks received at the current timestep.\n ' risk_aversion = 0.0 return (((1 - risk_aversion) * self.step_clicks) + ((risk_aversion * self.left) / self.type.budget)) def is_terminated(self, _ctx: ph.Context) -> bool: '@override\n This agent cannot perform any more bids if its budget is 0.\n ' return (self.left <= 0) def reset(self): '@override\n Reset method called before each episode to clear the state of the agent.\n ' super().reset() self.left = self.type.budget self.step_clicks = 0 self.step_wins = 0 self.total_clicks = defaultdict(int) self.total_requests = defaultdict(int) self.total_wins = defaultdict(int) self.bid = 0.0 self._current_user_id = 0.0 self._current_age = 0.0 self._current_zipcode = 0.0
class AdExchangeAgent(ph.Agent): "\n The `AdExchangeAgent` is actually just an actor who reacts to messages reveived.\n It doesn't perform any action on its own.\n " @dataclass(frozen=True) class AdExchangeView(ph.AgentView): '\n The view is used to expose additional information to other actors in the system.\n It is accessible via the `ph.Context` object passed as a parameters\n in the appropriate methods.\n\n For this use case we want to expose users information to the advertisers to help them\n decide on their bid\n ' users_info: dict def __init__(self, agent_id: str, publisher_id: str, advertiser_ids: Iterable=tuple(), strategy: str='first'): super().__init__(agent_id) self.publisher_id = publisher_id self.advertiser_ids = advertiser_ids self.strategy = strategy def view(self, neighbour_id=None) -> ph.View: '@override\n Method to provide extra information about the user. This information\n is made available only for advertisers in a pull fashion, i.e the\n advertiser needs to access the information explicitely via the `ctx`\n object if it wants to use it.\n ' if (neighbour_id and neighbour_id.startswith('ADV')): return self.AdExchangeView(users_info={1: {'age': 18, 'zipcode': 94025}, 2: {'age': 40, 'zipcode': 90250}}) else: return super().view(neighbour_id) @ph.agents.msg_handler(ImpressionRequest) def handle_impression_request(self, _ctx: ph.Context, msg: ph.Message[ImpressionRequest]): '\n The exchange acts as an intermediary between the publisher and the\n advertisers, upon the reception of an `ImpressionRequest`, the exchange\n simply forward that request to the advertisers\n ' logger.debug('AdExchange impression request %s', msg) return [(adv_id, msg.payload) for adv_id in self.advertiser_ids] def handle_batch(self, ctx: ph.Context, batch: Sequence[ph.Message]): '@override\n We override the method `handle_batch` to consume all the bids messages\n as one block in order to perform the auction. The batch object contains\n all the messages that were sent to the actor.\n\n Note:\n -----\n The default logic is to consume each message individually.\n ' bids = [] msgs = [] for message in batch: if isinstance(message.payload, Bid): bids.append(message) else: msgs += self.handle_message(ctx, message) if (len(bids) > 0): msgs += self.auction(bids) return msgs def auction(self, bids: Sequence[ph.Message[Bid]]): '\n Classic auction mechanism. We implement two types of auctions here:\n - first price: the cost corresponds to the highest bid\n - second price: the cost corresponds to the second highest bid\n In both cases the highest bid wins.\n ' if (self.strategy == 'first'): (winner, cost) = self._first_price_auction(bids) elif (self.strategy == 'second'): (winner, cost) = self._second_price_auction(bids) else: raise ValueError(f'Unknown auction strategy: {self.strategy}') logger.debug('AdExchange auction done winner: %s cost: %s', winner, cost) msgs = [] advertiser_ids = [m.sender_id for m in bids] msgs.append((self.publisher_id, Ads(advertiser_id=winner.sender_id, theme=winner.payload.theme, user_id=winner.payload.user_id))) for adv_id in advertiser_ids: adv_cost = (cost if (adv_id == winner.sender_id) else 0.0) msgs.append((adv_id, AuctionResult(cost=adv_cost, winning_bid=winner.payload.bid))) return msgs def _first_price_auction(self, bids: Sequence[ph.Message[Bid]]): sorted_bids = sorted(bids, key=(lambda m: m.payload.bid), reverse=True) winner = sorted_bids[0] cost = sorted_bids[0].payload.bid return (winner, cost) def _second_price_auction(self, bids: Sequence[ph.Message[Bid]]): sorted_bids = sorted(bids, key=(lambda m: m.payload.bid), reverse=True) winner = sorted_bids[0] cost = (sorted_bids[1].payload.bid if (len(bids) > 1) else sorted_bids[0].payload.bid) return (winner, cost)
class DigitalAdsEnv(ph.FiniteStateMachineEnv): def __init__(self, num_steps=20, num_agents_theme=None, **kwargs): self.exchange_id = 'ADX' self.publisher_id = 'PUB' USER_CLICK_PROBABILITIES = {1: {'sport': 0.0, 'travel': 1.0, 'science': 0.2, 'tech': 0.5}, 2: {'sport': 1.0, 'travel': 0.0, 'science': 0.7, 'tech': 0.5}} publisher_agent = PublisherAgent(self.publisher_id, exchange_id=self.exchange_id, user_click_proba=USER_CLICK_PROBABILITIES) advertiser_agents = [] i = 1 for (theme, num_agents) in num_agents_theme.items(): for _ in range(num_agents): advertiser_agents.extend([AdvertiserAgent(f'ADV_{i}', self.exchange_id, theme=theme)]) i += 1 self.advertiser_ids = [a.id for a in advertiser_agents] exchange_agent = AdExchangeAgent(self.exchange_id, publisher_id=self.publisher_id, advertiser_ids=self.advertiser_ids) actors = ([exchange_agent, publisher_agent] + advertiser_agents) network = ph.StochasticNetwork(actors, ph.resolvers.BatchResolver(round_limit=5), ignore_connection_errors=True) network.add_connections_between([self.exchange_id], [self.publisher_id]) network.add_connections_between([self.exchange_id], self.advertiser_ids) network.add_connections_between([self.publisher_id], self.advertiser_ids) super().__init__(num_steps=num_steps, network=network, initial_stage='publisher_step', stages=[ph.FSMStage(stage_id='publisher_step', next_stages=['advertiser_step'], acting_agents=[self.publisher_id], rewarded_agents=[self.publisher_id]), ph.FSMStage(stage_id='advertiser_step', next_stages=['publisher_step'], acting_agents=self.advertiser_ids, rewarded_agents=self.advertiser_ids)], **kwargs)
class AdvertiserBidUser(ph.metrics.Metric[float]): def __init__(self, agent_id: str, user_id: int) -> None: self.agent_id: str = agent_id self.user_id: int = user_id def extract(self, env: ph.PhantomEnv) -> float: '@override\n Extracts the per-step value to track\n ' if (env[self.agent_id]._current_user_id == self.user_id): return env[self.agent_id].bid return np.nan def reduce(self, values, mode=None) -> float: '@override\n The default logic returns the last step value,\n here we are interested in the average bid value\n ' return np.nanmean(values)
class AdvertiserAverageHitRatioUser(ph.metrics.Metric[float]): def __init__(self, agent_id: str, user_id: int) -> None: self.agent_id: str = agent_id self.user_id: int = user_id def extract(self, env: ph.PhantomEnv) -> float: '@override\n Extracts the per-step value to track\n ' if (env[self.agent_id].total_wins[self.user_id] != 0.0): return (env[self.agent_id].total_clicks[self.user_id] / env[self.agent_id].total_wins[self.user_id]) return np.nan def reduce(self, values, mode=None) -> float: '@override\n The default logic returns the last step value,\n here we are interested in the average bid value\n ' return values[(- 1)]
class AdvertiserAverageWinProbaUser(ph.metrics.Metric[float]): def __init__(self, agent_id: str, user_id: int) -> None: self.agent_id: str = agent_id self.user_id: int = user_id def extract(self, env: ph.PhantomEnv) -> float: '@override\n Extracts the per-step value to track\n ' if (env[self.agent_id].total_requests[self.user_id] != 0.0): return (env[self.agent_id].total_wins[self.user_id] / env[self.agent_id].total_requests[self.user_id]) return np.nan def reduce(self, values, mode=None) -> float: '@override\n The default logic returns the last step value,\n here we are interested in the average bid value\n ' return values[(- 1)]
class AdvertiserTotalRequests(ph.metrics.Metric[float]): def __init__(self, agent_id: str, user_id: int) -> None: self.agent_id: str = agent_id self.user_id: int = user_id def extract(self, env: ph.PhantomEnv) -> float: return env[self.agent_id].total_requests[self.user_id] def reduce(self, values, mode=None) -> float: return values[(- 1)]
class AdvertiserTotalWins(ph.metrics.Metric[float]): def __init__(self, agent_id: str, user_id: int) -> None: self.agent_id: str = agent_id self.user_id: int = user_id def extract(self, env: ph.PhantomEnv) -> float: return env[self.agent_id].total_wins[self.user_id] def reduce(self, values, mode=None) -> float: return values[(- 1)]
def BuyerPolicy(obs): if (obs[1] and (obs[0] <= obs[2])): action = obs[1] else: action = 0 return action
def SellerPolicy(obs): action = np.random.uniform() return action
def rollout(env): (observations, _) = env.reset() rewards = {} while (env.current_step < env.num_steps): print(env.current_step) print('\nobservations:') print(observations) print('\nrewards:') print(rewards) actions = {} for (aid, obs) in observations.items(): agent = env.agents[aid] if isinstance(agent, BuyerAgent): actions[aid] = BuyerPolicy(obs) elif isinstance(agent, SellerAgent): actions[aid] = SellerPolicy(obs) print('\nactions:') print(actions) step = env.step(actions) observations = step.observations rewards = step.rewards
@dataclass(frozen=True) class Leak(): victim_id: str price: float
class MaybeSneakySeller(SellerAgent): def __init__(self, agent_id: ph.AgentID, victim_id=None): super().__init__(agent_id) self.victim_id = victim_id self.victims_price = 0 self.observation_space = Box(np.array([0, 0, 0]), np.array([np.Inf, 1, 1])) def encode_observation(self, ctx): obs = np.array([self.current_tx, ctx.env_view.avg_price, self.victims_price]) self.current_tx = 0 return obs @ph.agents.msg_handler(Order) def handle_order_message(self, ctx, message): self.current_revenue += (self.current_price * message.payload.vol) self.current_tx += message.payload.vol @ph.agents.msg_handler(Leak) def handle_leak_message(self, ctx, message): self.victims_price = message.payload.price print('received leak message') print(message)
class MaybeLeakyBuyer(BuyerAgent): def __init__(self, agent_id, demand_prob, supertype, victim_id=None, adv_id=None): super().__init__(agent_id, demand_prob, supertype) self.victim_id = victim_id self.adv_id = adv_id @ph.agents.msg_handler(Price) def handle_price_message(self, ctx, message): self.seller_prices[message.sender_id] = message.payload.price if (message.sender_id == self.victim_id): responses = [(self.adv_id, Leak(victim_id=self.victim_id, price=message.payload.price))] print(responses) return responses
@dataclass(frozen=True) class AdversarialSetup(): leaky_buyer: ph.AgentID victim_seller: ph.AgentID adv_seller: ph.AgentID
class LeakySimpleMarketEnv(SimpleMarketEnv): def __init__(self, num_steps, network, adv_setup=None): super().__init__(num_steps, network) self.leaky = False if adv_setup: self.adversarial_setup(adv_setup.leaky_buyer, adv_setup.adv_seller, adv_setup.victim_seller) def adversarial_setup(self, leaky_buyer, adv_seller, victim_seller, victim_reward_coeff=1.0, adv_reward_coeff=1.0): self.leaky = True self.leaky_buyer = leaky_buyer self.adv_seller = adv_seller self.victim_seller = victim_seller self.agents[leaky_buyer].victim_id = victim_seller self.agents[leaky_buyer].adv_id = adv_seller self.agents[adv_seller].victim_id = victim_seller self.victim_coeff = victim_reward_coeff self.adv_coeff = adv_reward_coeff def compute_adv_reward(self, attacker_reward, victim_reward): '\n Computing the adversarial rewards, which is a combination of\n the penalized reward and the original agent reward\n ' return (((- self.victim_coeff) * victim_reward) + (self.adv_coeff * attacker_reward)) def step(self, actions, verbose=False): step = super().step(actions) if (self.leaky and (self.current_stage == 'Sellers')): step.rewards[self.adv_seller] = self.compute_adv_reward(step.rewards[self.adv_seller], step.rewards[self.victim_seller]) return step
@dataclass(frozen=True) class Price(ph.MsgPayload): price: float
@dataclass(frozen=True) class Order(ph.MsgPayload): vol: int
@dataclass class BuyerSupertype(ph.Supertype): value: float
class BuyerAgent(ph.StrategicAgent): def __init__(self, agent_id, demand_prob, supertype): super().__init__(agent_id, supertype=supertype) self.seller_prices = {} self.demand_prob = demand_prob self.current_reward = 0 self.action_space = Discrete(2) self.observation_space = Box(low=0, high=1, shape=(3,)) def decode_action(self, ctx, action): msgs = [] min_price = min(self.seller_prices.values()) if action: min_sellers = [k for (k, v) in self.seller_prices.items() if (v == min_price)] seller = random.choice(min_sellers) msgs.append((seller, Order(action))) self.current_reward += (((- action) * min_price) + self.type.value) return msgs def encode_observation(self, ctx): min_price = min(self.seller_prices.values()) demand = np.random.binomial(1, self.demand_prob) return np.array([min_price, demand, self.type.value]) def compute_reward(self, ctx): reward = self.current_reward self.current_reward = 0 return reward @ph.agents.msg_handler(Price) def handle_price_message(self, ctx, message): self.seller_prices[message.sender_id] = message.payload.price def reset(self): super().reset() self.seller_prices = {} self.current_reward = 0
class SellerAgent(ph.StrategicAgent): def __init__(self, agent_id: ph.AgentID): super().__init__(agent_id) self.current_price = 0 self.current_revenue = 0 self.current_tx = 0 self.action_space = Box(low=0, high=1, shape=(1,)) self.observation_space = Box(np.array([0, 0]), np.array([np.inf, 1])) def decode_action(self, ctx, action): self.current_price = action return [(nid, Price(action)) for nid in ctx.neighbour_ids] def encode_observation(self, ctx): obs = np.array([self.current_tx, ctx.env_view.avg_price]) self.current_tx = 0 return obs def compute_reward(self, ctx): reward = self.current_revenue self.current_revenue = 0 return reward def reset(self): self.current_price = self.action_space.sample() self.current_revenue = 0 self.current_tx = 0 @ph.agents.msg_handler(Order) def handle_order_message(self, ctx, message): self.current_revenue += (self.current_price * message.payload.vol) self.current_tx += message.payload.vol
class SimpleMarketEnv(ph.FiniteStateMachineEnv): @dataclass(frozen=True) class View(ph.fsm.FSMEnvView): avg_price: float def __init__(self, num_steps, network): buyers = [aid for (aid, agent) in network.agents.items() if isinstance(agent, BuyerAgent)] sellers = [aid for (aid, agent) in network.agents.items() if isinstance(agent, SellerAgent)] stages = [ph.FSMStage(stage_id='Buyers', next_stages=['Sellers'], acting_agents=buyers, rewarded_agents=buyers), ph.FSMStage(stage_id='Sellers', next_stages=['Buyers'], acting_agents=sellers, rewarded_agents=sellers)] self.avg_price = 0.0 super().__init__(num_steps, network, stages=stages, initial_stage='Sellers') def view(self, neighbour_id=None) -> 'SimpleMarketEnv.View': return self.View(avg_price=self.avg_price, **super().view({}).__dict__) def post_message_resolution(self): super().post_message_resolution() seller_prices = [agent.current_price for agent in self.agents.values() if isinstance(agent, SellerAgent)] self.avg_price = np.mean(seller_prices)
@ph.msg_payload('CustomerAgent', 'ShopAgent') class OrderRequest(): size: int
@ph.msg_payload('ShopAgent', 'CustomerAgent') class OrderResponse(): size: int
@ph.msg_payload('ShopAgent', 'FactoryAgent') class StockRequest(): size: int
@ph.msg_payload('FactoryAgent', 'ShopAgent') class StockResponse(): size: int
class FactoryAgent(ph.Agent): def __init__(self, agent_id: str): super().__init__(agent_id) @ph.agents.msg_handler(StockRequest) def handle_stock_request(self, ctx: ph.Context, message: ph.Message): return [(message.sender_id, StockResponse(message.payload.size))]
class CustomerAgent(ph.Agent): def __init__(self, agent_id: ph.AgentID, shop_id: ph.AgentID): super().__init__(agent_id) self.shop_id: str = shop_id @ph.agents.msg_handler(OrderResponse) def handle_order_response(self, ctx: ph.Context, message: ph.Message): return def generate_messages(self, ctx: ph.Context): order_size = np.random.randint(CUSTOMER_MAX_ORDER_SIZE) return [(self.shop_id, OrderRequest(order_size))]
class ShopAgent(ph.StrategicAgent): def __init__(self, agent_id: str, factory_id: str): super().__init__(agent_id) self.factory_id: str = factory_id self.stock: int = 0 self.sales: int = 0 self.missed_sales: int = 0 self.observation_space = gym.spaces.Box(low=0.0, high=1.0, shape=(3,)) self.action_space = gym.spaces.Box(low=0.0, high=SHOP_MAX_STOCK, shape=(1,)) def pre_message_resolution(self, ctx: ph.Context): self.sales = 0 self.missed_sales = 0 @ph.agents.msg_handler(StockResponse) def handle_stock_response(self, ctx: ph.Context, message: ph.Message): self.delivered_stock = message.payload.size self.stock = min((self.stock + self.delivered_stock), SHOP_MAX_STOCK) @ph.agents.msg_handler(OrderRequest) def handle_order_request(self, ctx: ph.Context, message: ph.Message): amount_requested = message.payload.size if (amount_requested > self.stock): self.missed_sales += (amount_requested - self.stock) stock_to_sell = self.stock self.stock = 0 else: stock_to_sell = amount_requested self.stock -= amount_requested self.sales += stock_to_sell return [(message.sender_id, OrderResponse(stock_to_sell))] def encode_observation(self, ctx: ph.Context): max_sales_per_step = (NUM_CUSTOMERS * CUSTOMER_MAX_ORDER_SIZE) return np.array([(self.stock / SHOP_MAX_STOCK), (self.sales / max_sales_per_step), (self.missed_sales / max_sales_per_step)], dtype=np.float32) def decode_action(self, ctx: ph.Context, action: np.ndarray): stock_to_request = min(int(round(action[0])), (SHOP_MAX_STOCK - self.stock)) return [(self.factory_id, StockRequest(stock_to_request))] def compute_reward(self, ctx: ph.Context) -> float: return (self.sales - (0.1 * self.stock)) def reset(self): self.stock = 0