Skip to content

Commit

Permalink
modified: losses.py
Browse files Browse the repository at this point in the history
  • Loading branch information
chiragnagpal committed Nov 26, 2020
1 parent 3078049 commit 7590995
Showing 1 changed file with 153 additions and 2 deletions.
155 changes: 153 additions & 2 deletions dsm/losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,30 @@
import torch
import torch.nn as nn

def _normal_loss(model, t, e):
shape, scale = model.get_shape_scale()

k_ = shape.expand(t.shape[0], -1)
b_ = scale.expand(t.shape[0], -1)

ll = 0.
for g in range(model.k):

mu = k_[:, g]
sigma = b_[:, g]

f = - sigma - 0.5*np.log(2*np.pi)
f = f - 0.5*torch.div((t - mu)**2, torch.exp(2*sigma))
s = torch.div(t - mu, torch.exp(sigma)*np.sqrt(2))
s = 0.5 - 0.5*torch.erf(s)
s = torch.log(s)

uncens = np.where(e == 1)[0]
cens = np.where(e == 0)[0]
ll += f[uncens].sum() + s[cens].sum()

return -ll.mean()


def _lognormal_loss(model, t, e):

Expand Down Expand Up @@ -96,10 +120,64 @@ def unconditional_loss(model, t, e):
return _weibull_loss(model, t, e)
elif model.dist == 'LogNormal':
return _lognormal_loss(model, t, e)
elif model.dist == 'Normal':
return _normal_loss(model, t, e)
else:
raise NotImplementedError('Distribution: '+model.dist+
' not implemented yet.')

def _conditional_normal_loss(model, x, t, e, elbo=True):

alpha = model.discount
shape, scale, logits = model.forward(x)

lossf = []
losss = []

k_ = shape
b_ = scale

for g in range(model.k):

mu = k_[:, g]
sigma = b_[:, g]

f = - sigma - 0.5*np.log(2*np.pi)
f = f - 0.5*torch.div((t - mu)**2, torch.exp(2*sigma))
s = torch.div(t - mu, torch.exp(sigma)*np.sqrt(2))
s = 0.5 - 0.5*torch.erf(s)
s = torch.log(s)

lossf.append(f)
losss.append(s)

losss = torch.stack(losss, dim=1)
lossf = torch.stack(lossf, dim=1)

if elbo:

lossg = nn.Softmax(dim=1)(logits)
losss = lossg*losss
lossf = lossg*lossf

losss = losss.sum(dim=1)
lossf = lossf.sum(dim=1)

else:

lossg = nn.LogSoftmax(dim=1)(logits)
losss = lossg + losss
lossf = lossg + lossf

losss = torch.logsumexp(losss, dim=1)
lossf = torch.logsumexp(lossf, dim=1)

uncens = np.where(e.cpu().data.numpy() == 1)[0]
cens = np.where(e.cpu().data.numpy() == 0)[0]
ll = lossf[uncens].sum() + alpha*losss[cens].sum()

return -ll/float(len(uncens)+len(cens))

def _conditional_lognormal_loss(model, x, t, e, elbo=True):

alpha = model.discount
Expand Down Expand Up @@ -150,7 +228,7 @@ def _conditional_lognormal_loss(model, x, t, e, elbo=True):
cens = np.where(e.cpu().data.numpy() == 0)[0]
ll = lossf[uncens].sum() + alpha*losss[cens].sum()

return -ll.mean()
return -ll/float(len(uncens)+len(cens))


def _conditional_weibull_loss(model, x, t, e, elbo=True):
Expand Down Expand Up @@ -199,7 +277,7 @@ def _conditional_weibull_loss(model, x, t, e, elbo=True):
cens = np.where(e.cpu().data.numpy() == 0)[0]
ll = lossf[uncens].sum() + alpha*losss[cens].sum()

return -ll.mean()
return -ll/float(len(uncens)+len(cens))


def conditional_loss(model, x, t, e, elbo=True):
Expand All @@ -208,6 +286,8 @@ def conditional_loss(model, x, t, e, elbo=True):
return _conditional_weibull_loss(model, x, t, e, elbo)
elif model.dist == 'LogNormal':
return _conditional_lognormal_loss(model, x, t, e, elbo)
elif model.dist == 'Normal':
return _conditional_normal_loss(model, x, t, e, elbo)
else:
raise NotImplementedError('Distribution: '+model.dist+
' not implemented yet.')
Expand Down Expand Up @@ -284,13 +364,84 @@ def _lognormal_cdf(model, x, t_horizon):

return cdfs

def _normal_cdf(model, x, t_horizon):

squish = nn.LogSoftmax(dim=1)

shape, scale, logits = model.forward(x)
logits = squish(logits)

k_ = shape
b_ = scale

t_horz = torch.tensor(t_horizon).double()
t_horz = t_horz.repeat(shape.shape[0], 1)

cdfs = []

for j in range(len(t_horizon)):

t = t_horz[:, j]
lcdfs = []

for g in range(model.k):

mu = k_[:, g]
sigma = b_[:, g]

s = torch.div(t - mu, torch.exp(sigma)*np.sqrt(2))
s = 0.5 - 0.5*torch.erf(s)
s = torch.log(s)
lcdfs.append(s)

lcdfs = torch.stack(lcdfs, dim=1)
lcdfs = lcdfs+logits
lcdfs = torch.logsumexp(lcdfs, dim=1)
cdfs.append(lcdfs.detach().numpy())

return cdfs

def _normal_mean(model, x):

squish = nn.Softmax(dim=1)
shape, scale, logits = model.forward(x)

logits = squish(logits)
k_ = shape
b_ = scale

lmeans = []
for g in range(model.k):

mu = k_[:, g]
sigma = b_[:, g]
lmeans.append(mu)

lmeans = torch.stack(lmeans, dim=1)
lmeans = lmeans*logits
lmeans = torch.sum(lmeans, dim=1)

return lmeans.detach().numpy()

def predict_mean(model, x):
torch.no_grad()
if model.dist == 'Normal':
return _normal_mean(model, x)
else:
raise NotImplementedError('Mean of Distribution: '+model.dist+
' not implemented yet.')




def predict_cdf(model, x, t_horizon):
torch.no_grad()
if model.dist == 'Weibull':
return _weibull_cdf(model, x, t_horizon)
if model.dist == 'LogNormal':
return _lognormal_cdf(model, x, t_horizon)
if model.dist == 'Normal':
return _normal_cdf(model, x, t_horizon)
else:
raise NotImplementedError('Distribution: '+model.dist+
' not implemented yet.')

0 comments on commit 7590995

Please sign in to comment.