見出し画像

pytorch_lightning==1.2でaccuracy計算時のエラー対応について

pytorch_lightningを使ってaccuracyを計算すると下記エラーが発生しました。

ValueError: The `preds` should be probabilities, but values were detected outside of [0,1] range.

Netクラスは下記の様にしています。

from pytorch_lightning.metrics.functional import  accuracy


class Net(pl.LightningModule):

 def __init__(self):
   super().__init__()

   self.conv = nn.Conv2d(in_channels=1, out_channels=3, kernel_size=3, padding=1)
   self.bn = nn.BatchNorm2d(3)
   self.fc = nn.Linear(588, 10)

 def forward(self, x):
   h = self.conv(x)
   h = F.relu(h)
   h = self.bn(h)
   h = F.max_pool2d(h, kernel_size=2, stride=2)
   h = h.view(-1, 588)
   h = self.fc(h)
   return h

 def training_step(self, batch, batch_idx):
   x, t = batch
   y = self.forward(x)
   loss = F.cross_entropy(y, t)
   self.log('train_loss', loss, on_step=False, on_epoch=True, prog_bar=True)
   self.log('train_acc', accuracy(y, t), on_step=False, on_epoch=True, prog_bar=True)
   return loss

 def validation_step(self, batch, batch_idx):
   x, t = batch
   y = self.forward(x)
   loss = F.cross_entropy(y, t)
   self.log('val_loss', loss, on_step=False, on_epoch=True, prog_bar=True)
   self.log('val_acc', accuracy(y, t), on_step=False, on_epoch=True, prog_bar=True)
   return loss

 def test_step(self, batch, batch_idx):
   x, t = batch
   y = self.forward(x)
   loss = F.cross_entropy(y, t)
   self.log('test_loss', loss, on_step=False, on_epoch=True, prog_bar=True)
   self.log('test_acc', accuracy(y, t), on_step=False, on_epoch=True, prog_bar=True)
   return loss

 def configure_optimizers(self):
   optimizer = torch.optim.SGD(self.parameters(), lr=0.01)
   return optimizer

pytorch_lightning==1.1まではaccuracyを計算する際にyとtを引数で渡すとよかったですが、1.2からはyを0,1(ワンホット)で渡す必要があるようです。

yを0,1(ワンホット)で渡すように修正するとうまく動きました。

class Net(pl.LightningModule):

 def __init__(self):
   super().__init__()

   self.conv = nn.Conv2d(in_channels=1, out_channels=3, kernel_size=3, padding=1)
   self.bn = nn.BatchNorm2d(3)
   self.fc = nn.Linear(588, 10)

 def forward(self, x):
   h = self.conv(x)
   h = F.relu(h)
   h = self.bn(h)
   h = F.max_pool2d(h, kernel_size=2, stride=2)
   h = h.view(-1, 588)
   h = self.fc(h)
   return h

 def training_step(self, batch, batch_idx):
   x, t = batch
   y = self.forward(x)
   loss = F.cross_entropy(y, t)
   y_label = torch.argmax(y, dim=1)
   self.log('train_loss', loss, on_step=False, on_epoch=True, prog_bar=True)
   self.log('train_acc', accuracy(y_label, t), on_step=False, on_epoch=True, prog_bar=True)
   return loss

 def validation_step(self, batch, batch_idx):
   x, t = batch
   y = self.forward(x)
   loss = F.cross_entropy(y, t)
   y_label = torch.argmax(y, dim=1)
   self.log('val_loss', loss, on_step=False, on_epoch=True, prog_bar=True)
   self.log('val_acc', accuracy(y_label, t), on_step=False, on_epoch=True, prog_bar=True)
   return loss

 def test_step(self, batch, batch_idx):
   x, t = batch
   y = self.forward(x)
   loss = F.cross_entropy(y, t)
   y_label = torch.argmax(y, dim=1)
   self.log('test_loss', loss, on_step=False, on_epoch=True, prog_bar=True)
   self.log('test_acc', accuracy(y_label, t), on_step=False, on_epoch=True, prog_bar=True)
   return loss

 def configure_optimizers(self):
   optimizer = torch.optim.SGD(self.parameters(), lr=0.01)
   return optimizer

自分用のメモで残しました。



この記事が気に入ったらサポートをしてみませんか?