forked from QuoQA-NLP/MRC_Baseline
-
Notifications
You must be signed in to change notification settings - Fork 0
/
roberta.py
101 lines (85 loc) · 4.24 KB
/
roberta.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import math
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss
from dataclasses import dataclass
from typing import Optional, Tuple, Union
from transformers.models.roberta.modeling_roberta import RobertaPreTrainedModel, RobertaModel
from models.output import QuestionAnsweringV2ModelOutput
class RobertaForV2QuestionAnswering(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.impossible_outputs = nn.Sequential(nn.Dropout(p=0.1), nn.Linear(config.hidden_size, 1))
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
is_impossibles: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], QuestionAnsweringV2ModelOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0] # (batch_size, seq_size, hidden_size)
sentence_output = sequence_output[:, 0, :] # (batch_size, hidden_size)
sentence_logits = self.impossible_outputs(sentence_output)
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct_start = CrossEntropyLoss(ignore_index=ignored_index)
loss_fct_end = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct_start(start_logits, start_positions)
end_loss = loss_fct_end(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if is_impossibles is not None:
loss_fct = BCEWithLogitsLoss()
is_impossible_loss = loss_fct(
sentence_logits.view(-1,), is_impossibles.float().view(-1,)
)
total_loss += is_impossible_loss * 0.1
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringV2ModelOutput(
loss=total_loss,
is_impossible_logits=sentence_logits,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)