1

我是这方面的初学者。请帮助我找到解决方案。我已经使用RobertaTokenizerFast对文本和摘要进行标记(max_token_length分别为 200 和 50)。计划是使用RoBERTa作为第一层。然后使用 conv2d、maxpool2d 和dense 压缩其输出以匹配目标摘要。最后一个密集层的输出是一个浮点向量。因此,我已将包含long input_ids 的目标向量标准化为浮点值(0 到 1)。最后,我使用了 CrossEntropy函数来获得损失。

class Summarizer(pl.LightningModule):
  def __init__(self):
    super().__init__()
    self.roberta = RobertaModel.from_pretrained('roberta-base', return_dict = True, is_decoder=True, use_cache=False) 
    self.convlayer = torch.nn.Conv2d(in_channels=BATCH_SIZE, out_channels=1, kernel_size=4) 
                                              ## BATCH_SIZE=20
    self.relu = torch.nn.ReLU()
    self.fc = torch.nn.Linear(in_features=97*381, out_features=50)
    self.cross_entropy_loss = torch.nn.CrossEntropyLoss()

  def forward(self, input_ids, attention_mask, labels):
    output = self.roberta(input_ids=input_ids, attention_mask=attention_mask) 
    x = output['last_hidden_state']
    x = torch.unsqueeze(x, 0)
    x = self.convlayer(x)
    x = self.relu(x)
    x = F.max_pool2d(x, kernel_size=4, stride=2)
    x = x.squeeze().flatten()
    x = self.fc(x)
    output = self.relu(x)
    crossent_loss = self.cross_entropy_loss(labels, output)
    return crossent_loss, output
  
  def training_step(self, batch, batch_idx):
    input_ids = batch['text_input_ids']
    attention_mask = batch['text_attention_mask']

    l = batch['labels'].float()
    l = torch.tensor(l/torch.linalg.norm(l))

    labels = l # normalized labels in (0,1)
    labels_attention_mask = batch['labels_attention_mask']


    loss, outputs = self(
                         input_ids = input_ids,
                         attention_mask = attention_mask,
                         labels = labels
                         )
    self.log('train_loss', loss, prog_bar = True, logger = True)
    return loss

  def validation_step(self, batch, batch_idx): 
    input_ids = batch['text_input_ids']
    attention_mask = batch['text_attention_mask']

    l = batch['labels'].float()
    l = torch.tensor(l/torch.linalg.norm(l))
    
    labels = l
    labels_attention_mask = batch['labels_attention_mask']

    loss, outputs = self(
                         input_ids = input_ids,
                         attention_mask = attention_mask,
                         labels = labels
                         )
    self.log('val_loss', loss, prog_bar = True, logger = True)
    return loss

  def test_step(self, batch, batch_idx):
    input_ids = batch['text_input_ids']
    attention_mask = batch['text_attention_mask']
    
    l = batch['labels'].float()
    l = torch.tensor(l/torch.linalg.norm(l))
    
    labels = l
    labels_attention_mask = batch['labels_attention_mask']

    loss, outputs = self(
                         input_ids = input_ids,
                         attention_mask = attention_mask,
                         labels = labels
                         )
    self.log('test_loss', loss, prog_bar = True, logger = True)
    return loss

  def configure_optimizers(self):
    return AdamW(self.parameters(), lr=0.0001)

使用pl.Trainer返回的培训ValueError: Expected input batch_size (20) to match target batch_size (50). 我无法得到错误。

4

0 回答 0