Skip to content

Commit

Permalink
chore: train leak
Browse files Browse the repository at this point in the history
  • Loading branch information
WLM1ke committed Jan 10, 2025
1 parent 559f173 commit dc6ccd3
Show file tree
Hide file tree
Showing 5 changed files with 153 additions and 36 deletions.
24 changes: 12 additions & 12 deletions poptimizer/domain/evolve/evolve.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,23 +36,23 @@ class Model(domain.Entity):
cov: list[list[FiniteFloat]] = Field(default_factory=list)
risk_tolerance: FiniteFloat = Field(default=0, ge=0, le=1)

@model_validator(mode="after")
def _match_length(self) -> Self:
n = len(self.tickers)
# @model_validator(mode="after")
# def _match_length(self) -> Self:
# n = len(self.tickers)

if len(self.mean) != n:
raise ValueError("invalid mean")
# if len(self.mean) != n:
# raise ValueError("invalid mean")

if any(len(row) != 1 for row in self.mean):
raise ValueError("invalid mean")
# if any(len(row) != 1 for row in self.mean):
# raise ValueError("invalid mean")

if len(self.cov) != n:
raise ValueError("invalid cov")
# if len(self.cov) != n:
# raise ValueError("invalid cov")

if any(len(row) != n for row in self.cov):
raise ValueError("invalid cov")
# if any(len(row) != n for row in self.cov):
# raise ValueError("invalid cov")

return self
# return self

def __str__(self) -> str:
genes = genotype.Genotype.model_validate(self.genes)
Expand Down
113 changes: 113 additions & 0 deletions poptimizer/leak.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor


def prepare_data_loaders():
training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor(),
)

test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor(),
)

batch_size = 64
test_dataloader = DataLoader(test_data, batch_size=batch_size)

return DataLoader(training_data, batch_size=batch_size), test_dataloader


def get_device():
device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
print(f"Using {device} device")

return device


def prepare_net(device):
class NeuralNetwork(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28 * 28, 512), nn.ReLU(), nn.Linear(512, 512), nn.ReLU(), nn.Linear(512, 10)
)

def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits

model = NeuralNetwork().to(device)

loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.AdamW(model.parameters())
# optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)

return model, loss_fn, optimizer


def train(dataloader, model, loss_fn, optimizer, device):
size = len(dataloader.dataset)
model.train()
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)

# Compute prediction error
pred = model(X)
loss = loss_fn(pred, y)

# Backpropagation
loss.backward()
optimizer.step()
optimizer.zero_grad()

if batch % 100 == 0:
loss, current = loss.item(), (batch + 1) * len(X)


def test(dataloader, model, loss_fn, device):
size = len(dataloader.dataset)
num_batches = len(dataloader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size

return pred.cpu().numpy()


def run():
device = get_device()
model, loss_fn, optimizer = prepare_net(device)
train_dataloader, test_dataloader = prepare_data_loaders()
epochs = 5
for t in range(epochs):
train(train_dataloader, model, loss_fn, optimizer, device)
com = test(test_dataloader, model, loss_fn, device)

return com


com = 0
while True:
com += run()
torch.mps.empty_cache()
print(torch.mps.driver_allocated_memory() // 1024)

# 420
16 changes: 10 additions & 6 deletions poptimizer/use_cases/dl/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import time
from typing import Literal, cast

import numpy as np
import torch
import tqdm
from pydantic import BaseModel
Expand Down Expand Up @@ -116,8 +117,11 @@ def _run(
net = self._prepare_net(cfg)
self._train(net, cfg.scheduler, data, cfg.batch.size)

model.alfas = self._test(net, cfg, forecast_days, data)
model.mean, model.cov = self._forecast(net, forecast_days, data)
# model.alfas = self._test(net, cfg, forecast_days, data)
# model.mean, model.cov = self._forecast(net, forecast_days, data)

torch.mps.empty_cache()
print(torch.mps.driver_allocated_memory() // 1024)

def _train(
self,
Expand Down Expand Up @@ -183,10 +187,10 @@ def _test(

loss, mean, std = net.loss_and_forecast_mean_and_std(self._batch_to_device(batch))
rez = risk.optimize(
mean,
std,
batch[features.FeatTypes.LABEL].cpu().numpy() - 1,
batch[features.FeatTypes.RETURNS].cpu().numpy(),
np.copy(mean),
np.copy(std),
np.copy(batch[features.FeatTypes.LABEL].cpu().numpy()) - 1,
np.copy(batch[features.FeatTypes.RETURNS].cpu().numpy()),
cfg.risk,
forecast_days,
)
Expand Down
14 changes: 7 additions & 7 deletions poptimizer/use_cases/evolve/evolve.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,19 +64,19 @@ async def __call__(
) -> handler.ModelDeleted | handler.ModelEvaluated:
await self._init_evolution(ctx)
evolution = await self._init_step(ctx, msg)
model = await self._get_model(ctx, evolution)
model = await ctx.next_model_for_update()
self._lgr.info("Day %s step %d: %s - %s", evolution.day, evolution.step, evolution.state, model)

try:
await self._update_model_metrics(ctx, evolution, model)
except* errors.DomainError as err:
await self._delete_model_on_error(ctx, evolution, model, err)
# await self._delete_model_on_error(ctx, evolution, model, err)
pass
# event = handler.ModelDeleted(day=evolution.day, portfolio_ver=evolution.portfolio_ver, uid=model.uid)
# else:
# return await self._eval_model(ctx, evolution, model)

event = handler.ModelDeleted(day=evolution.day, portfolio_ver=evolution.portfolio_ver, uid=model.uid)
else:
return await self._eval_model(ctx, evolution, model)

return event
return handler.ModelDeleted(day=msg.day, portfolio_ver=msg.portfolio_ver, uid=model.uid)

async def _init_evolution(self, ctx: Ctx) -> None:
if not await ctx.count_models():
Expand Down
22 changes: 11 additions & 11 deletions poptimizer/use_cases/portfolio/forecasts.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,20 +23,20 @@ async def __call__(
ctx: handler.Ctx,
msg: handler.ModelDeleted | handler.ModelEvaluated,
) -> handler.ForecastsAnalyzed | None:
forecast = await ctx.get_for_update(forecasts.Forecast)
if forecast.day < msg.day:
forecast.init_day()
# forecast = await ctx.get_for_update(forecasts.Forecast)
# if forecast.day < msg.day:
# forecast.init_day()

match msg:
case handler.ModelDeleted():
forecast.models -= {msg.uid}
case handler.ModelEvaluated():
forecast.models.add(msg.uid)
# match msg:
# case handler.ModelDeleted():
# forecast.models -= {msg.uid}
# case handler.ModelEvaluated():
# forecast.models.add(msg.uid)

if forecast.update_required(msg.portfolio_ver):
await self._update(ctx, forecast)
# if forecast.update_required(msg.portfolio_ver):
# await self._update(ctx, forecast)

return handler.ForecastsAnalyzed(day=forecast.day, portfolio_ver=forecast.portfolio_ver)
return handler.ForecastsAnalyzed(day=msg.day, portfolio_ver=msg.portfolio_ver)

async def _update(
self,
Expand Down

0 comments on commit dc6ccd3

Please sign in to comment.