Commit ·
a1b3d87
1
Parent(s): 10a629a
Upload param.py
Browse files
param.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import torch
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
random.seed(0)
|
| 6 |
+
torch.manual_seed(0)
|
| 7 |
+
torch.cuda.manual_seed_all(0)
|
| 8 |
+
|
| 9 |
+
np.random.seed(0)
|
| 10 |
+
|
| 11 |
+
settings = {
|
| 12 |
+
'model':{
|
| 13 |
+
'baseline': {
|
| 14 |
+
'random': {
|
| 15 |
+
'b': 128
|
| 16 |
+
},
|
| 17 |
+
'fnn':{
|
| 18 |
+
'l': [128], # list of number of nodes in each layer
|
| 19 |
+
'lr': 0.1, # learning rate
|
| 20 |
+
'b': 128, # batch size
|
| 21 |
+
'e': 20, # epoch
|
| 22 |
+
'nns': 3, # number of negative samples
|
| 23 |
+
'ns': 'unigram_b', # 'uniform', 'unigram', 'unigram_b'
|
| 24 |
+
},
|
| 25 |
+
'bnn':{
|
| 26 |
+
'l': [128], # list of number of nodes in each layer
|
| 27 |
+
'lr': 0.1, # learning rate
|
| 28 |
+
'b': 128, # batch size
|
| 29 |
+
'e': 20, # epoch
|
| 30 |
+
'nns': 3, # number of negative samples
|
| 31 |
+
'ns': 'unigram_b', # 'uniform', 'unigram', 'unigram_b'
|
| 32 |
+
's': 1 # # sample_elbo for bnn
|
| 33 |
+
},
|
| 34 |
+
'nmt': {
|
| 35 |
+
'base_config': './mdl/nmt_config.yaml'
|
| 36 |
+
},
|
| 37 |
+
'caser': {},
|
| 38 |
+
'rrn': {
|
| 39 |
+
'with_zero': True
|
| 40 |
+
},
|
| 41 |
+
'emb':{
|
| 42 |
+
'd': 100,# embedding dimension
|
| 43 |
+
'e': 100,# epoch
|
| 44 |
+
'dm': 1,# training algorithm. 1: distributed memory (PV-DM), 0: distributed bag of words (PV-DBOW)
|
| 45 |
+
'w': 1 #cooccurrence window
|
| 46 |
+
}
|
| 47 |
+
},
|
| 48 |
+
'cmd': ['train', 'test', 'eval', 'fair'], # 'train', 'test', 'eval', 'plot', 'agg', 'adila'
|
| 49 |
+
'nfolds': 5,
|
| 50 |
+
'train_test_split': 0.85,
|
| 51 |
+
'step_ahead': 2,#for now, it means that whatever are in the last [step_ahead] time interval will be the test set!
|
| 52 |
+
},
|
| 53 |
+
'data':{
|
| 54 |
+
'domain': {
|
| 55 |
+
'dblp':{},
|
| 56 |
+
'uspt':{},
|
| 57 |
+
'imdb':{},
|
| 58 |
+
},
|
| 59 |
+
'location_type': 'country', #should be one of 'city', 'state', 'country' and represents the location of members in teams (not the location of teams)
|
| 60 |
+
'filter': {
|
| 61 |
+
'min_nteam': 75,
|
| 62 |
+
'min_team_size': 3,
|
| 63 |
+
},
|
| 64 |
+
'parallel': 1,
|
| 65 |
+
'ncore': 0,# <= 0 for all
|
| 66 |
+
'bucket_size': 500
|
| 67 |
+
},
|
| 68 |
+
'fair': {'np_ratio': None,
|
| 69 |
+
'fairness': ['det_greedy',],
|
| 70 |
+
'k_max': None,
|
| 71 |
+
'fairness_metrics': {'ndkl'},
|
| 72 |
+
'utility_metrics': {'map_cut_2,5,10'},
|
| 73 |
+
'eq_op': False,
|
| 74 |
+
'mode': 0,
|
| 75 |
+
'core': -1,
|
| 76 |
+
'attribute': ['gender', 'popularity']},
|
| 77 |
+
}
|