-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodeling.py
More file actions
68 lines (62 loc) · 2.82 KB
/
modeling.py
File metadata and controls
68 lines (62 loc) · 2.82 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import torch
import torch.nn as nn
from transformers import BertPreTrainedModel, BertModel, AlbertPreTrainedModel, AlbertModel, DistilBertPreTrainedModel, DistilBertModel
class BertForSentimentClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
#The classification layer that takes the [CLS] representation and outputs the logit
self.cls_layer = nn.Linear(config.hidden_size, 1)
def forward(self, input_ids, attention_mask):
'''
Inputs:
-input_ids : Tensor of shape [B, T] containing token ids of sequences
-attention_mask : Tensor of shape [B, T] containing attention masks to be used to avoid contibution of PAD tokens
(where B is the batch size and T is the input length)
'''
#Feed the input to Bert model to obtain outputs
outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
#Obtain the representations of [CLS] heads
cls_reps = outputs.last_hidden_state[:, 0]
# cls_reps = self.dropout(cls_reps)
logits = self.cls_layer(cls_reps)
return logits
class AlbertForSentimentClassification(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.albert = AlbertModel(config)
#The classification layer that takes the [CLS] representation and outputs the logit
self.cls_layer = nn.Linear(768, 1)
def forward(self, input_ids, attention_mask):
'''
Inputs:
-input_ids : Tensor of shape [B, T] containing token ids of sequences
-attention_mask : Tensor of shape [B, T] containing attention masks to be used to avoid contibution of PAD tokens
(where B is the batch size and T is the input length)
'''
#Feed the input to Albert model to obtain outputs
outputs = self.albert(input_ids=input_ids, attention_mask=attention_mask)
#Obtain the representations of [CLS] heads
cls_reps = outputs.last_hidden_state[:, 0]
logits = self.cls_layer(cls_reps)
return logits
class DistilBertForSentimentClassification(DistilBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.distilbert = DistilBertModel(config)
#The classification layer that takes the [CLS] representation and outputs the logit
self.cls_layer = nn.Linear(768, 1)
def forward(self, input_ids, attention_mask):
'''
Inputs:
-input_ids : Tensor of shape [B, T] containing token ids of sequences
-attention_mask : Tensor of shape [B, T] containing attention masks to be used to avoid contibution of PAD tokens
(where B is the batch size and T is the input length)
'''
#Feed the input to DistilBert model to obtain outputs
outputs = self.distilbert(input_ids=input_ids, attention_mask=attention_mask)
#Obtain the representations of [CLS] heads
cls_reps = outputs.last_hidden_state[:, 0]
logits = self.cls_layer(cls_reps)
return logits