a b/conf/config_AY_toy.yaml
1
# Description: Configuration file for running any of the experiments
2
# All settings can be overwritten by the experiment configuration file, specified for defaults (or on the command line)
3
# ==============================================================================
4
5
# Defaults
6
defaults:
7
  - _self_
8
  - experiment@_here_: expertise_sensitivity 
9
10
# EXPERIMENT SETTINGS 
11
# ==============================================================================
12
seeds: [0,1,2,3,4] 
13
n_splits: 5 # How many splits to perform on data for error bars in results
14
model_names: ["Torch_TARNet", "Torch_DragonNet", "Torch_DragonNet_2","Torch_DragonNet_4","Torch_CFRNet_0.001", "Torch_TLearner","Torch_SLearner", "Torch_XLearner", "EconML_TLearner_Lasso", "EconML_SLearner_Lasso","Torch_ActionNet"]
15
# ==============================================================================
16
17
# DATA 
18
# ==============================================================================
19
results_dictionary_prefix: "toy"
20
dataset: "toy_data" 
21
repo_path: "PATH_TO_REPO"
22
directory_path: "${repo_path}/data"
23
results_path: "${repo_path}/results"
24
# ==============================================================================
25
26
# Evaluation
27
# ==============================================================================
28
evaluate_inference: false
29
evaluate_explanations: false
30
evaluate_prog_explanations: false
31
evaluate_in_context_variability: false
32
train_baseline_learner: true
33
# ==============================================================================
34
35
# FEATURE ATTRIBUTION 
36
# ==============================================================================
37
explainer_limit: 20 # Number of features to be explained for feature attribution-based scores
38
explainer_econml: "shap" 
39
explainer_torch: "integrated_gradients"
40
# ==============================================================================
41
42
# COMPUTE
43
# ==============================================================================
44
n_jobs: -1 # Number of jobs to run in parallel, -1 means using all processors
45
# ==============================================================================
46
47
# PLOTTING RESULTS
48
# ==============================================================================
49
# Metrics plots
50
plot_results: true
51
52
metrics_to_plot: ["Policy Precision", "Pred Precision", "GT In-context Var", "GT Total Expertise", "GT Prog Expertise", "GT Tre Expertise", "GT Pred Expertise", "RMSE Y0",  "RMSE Y1", "PEHE", "Upd. GT Prog Expertise", "Upd. GT Tre Expertise", "Upd. GT Pred Expertise", "Factual RMSE Y0", "CF RMSE Y0", "Factual RMSE Y1", "CF RMSE Y1", "Factual RMSE", "CF RMSE", 'Normalized F-RMSE', 'Normalized CF-RMSE', 'Normalized PEHE', 'Swap AUROC@all', 'Swap AUPRC@all', "FC PEHE", "FC CF-RMSE", "FC Swap AUROC", "FC Swap AUPRC", 'Pred: Pred features ACC', 'Pred: Prog features ACC', 'Prog: Prog features ACC', 'Prog: Pred features ACC', "GT Expertise Ratio", "GT-ES Pred Expertise Diff", "GT-ES Prog Expertise Diff", "GT-ES Total Expertise Diff", 'T Distribution: Train', 'T Distribution: Test', 'Training Duration']
53
54
# Dim reduction plots
55
dim_reduction_method: "umap" #"pca"
56
dim_reduction_on_important_features: false
57
top_k_shap_features: 5
58
num_levels: 20
59
# ==============================================================================
60
61
# LOGGING AND W&B
62
# ==============================================================================
63
log_level: "INFO" # INFO, DEBUG, WARNING, ERROR, CRITICAL
64
debug: false
65
verbose: 1
66
# ==============================================================================
67
68
# MODELS
69
# ==============================================================================
70
diffpo_path: "${repo_path}/catenets/models/diffpo"
71
# ==============================================================================
72
73
# MULTIRUN SETTINGS
74
# ==============================================================================
75
hydra:
76
  mode: MULTIRUN
77
  sweeper:
78
    params:
79
      experiment@_global_: propensity_scale_sensitivity_toy
80
      simulator: ty_simulator_toy
81
      # dataset: str(toy_data), str(), ...
82
# ==============================================================================