-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathhyperparams.yml
More file actions
32 lines (32 loc) · 2.45 KB
/
hyperparams.yml
File metadata and controls
32 lines (32 loc) · 2.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
batch_size: 1000 # Batch size for graph sampling
epochs: 500 # Number of epochs for training
test_epochs: 500 # Number of epochs for testing
weight_decay: 0.00001 # Weight decay for ...
warmup: 50 # Number of wamup steps for ...
g_temperature: 1.0 # Temperature of augmentation model g (?)
g_hidden: 64 # Number of hidden units for augmentation model g (?)
g_dropout: 0.1 # Dropout for encoder for augmentation model g
g_nlayer: 1 # Number of layers for encoder for augmentation model g
mlpx_dropout: 0.1 # Dropout for MLPX model in augmentation model g
f_hidden: 64 # Number of hidden units for encoder model f (?)
f_output_features: 64 # Number of output features for encoder model f (?)
f_dropout: 0.1 # Dropout for encoder model f (?)
f_layers: 2 # Number of layers for encoder model f (?)
k_hidden: 64 # Number of hidden units for adversary model k (?)
k_output_features: 64 # Number of output features for adversary model k (?)
k_dropout: 0.1 # Dropout in GCN for adversary model k
k_nlayer: 2 # Number of layers in GCN for adversary model k
c_hidden: 64 # Number of hidden units for classifier model (?)
c_input: 64 # Number of input units for classifier model (?)
alpha: 10 # Loss alpha weight ...
beta: 0.1 # Loss beta weight ...
gamma: 0.1 # Loss gamma weight ...
lam: 0.5 # Loss lambda weight ...
k_lr: 0.001 # Learning rate for adversary model k
c_lr: 0.001 # Learining rate for classifier
g_lr: 0.0001 # Learning rate for joint f and g models optimization
g_warmup_lr: 0.001 # Learning rate for a warmup optimizer for augmentation model g
f_lr: 0.001 # Learning rate for encoder model f
graphair_temperature: 0.07 # Temperature for the whole graphair model
edge_perturbation: True # Whether to use edge perturbation
node_feature_masking: True # Whether to use feature masking