Skip to content

Commit

Permalink
Added a VAE class that returns the latent instead of Y, this class ha…
Browse files Browse the repository at this point in the history
…s a inverse_transform that runs the decoder
  • Loading branch information
marjanfamili committed Mar 7, 2025
1 parent 015b41d commit 25dfb88
Show file tree
Hide file tree
Showing 4 changed files with 418 additions and 353 deletions.
1 change: 1 addition & 0 deletions autoemulate/compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -475,6 +475,7 @@ def evaluate(self, model=None, multioutput="uniform_average"):
methods=self.preprocess_outputs
).inverse_transform(y_pred)


y_true = self.y[self.test_idxs]

scores = {}
Expand Down
19 changes: 12 additions & 7 deletions autoemulate/model_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from sklearn.multioutput import MultiOutputRegressor
from sklearn.pipeline import Pipeline

from autoemulate.preprocess_target import OutputOnlyPreprocessor
from autoemulate.preprocess_target import OutputOnlyPreprocessor, VAEOutputPreprocessor


def _turn_models_into_multioutput(models, y):
Expand Down Expand Up @@ -59,12 +59,17 @@ def _wrap_models_in_pipeline(
for model in models:
steps = []
if preprocess_outputs:
steps.append(
(
"output_preprocessor",
OutputOnlyPreprocessor(methods=preprocess_outputs),
)
)
# If preprocess_outputs is 'vae', use our custom VAE preprocessor
if preprocess_outputs == 'vae':
steps.append(("output_preprocessor", VAEOutputPreprocessor(
latent_dim=4, # Dimension of latent space
epochs=150, # Number of training epochs
batch_size=8, # Batch size for training
hidden_dims=[32, 16, 8] # Architecture of encoder/decoder
)))
else:
# Otherwise use the original OutputOnlyPreprocessor
steps.append(("output_preprocessor", OutputOnlyPreprocessor(methods=preprocess_outputs)))
if scale:
steps.append(("scaler", scaler))
if reduce_dim:
Expand Down
Loading

0 comments on commit 25dfb88

Please sign in to comment.