-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathattention.py
More file actions
85 lines (71 loc) · 2.63 KB
/
attention.py
File metadata and controls
85 lines (71 loc) · 2.63 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import torch
import torch.nn as nn
from torch.nn.utils.weight_norm import weight_norm
from fc import FCNet
from bc import BCNet
class Attention(nn.Module):
def __init__(self, v_dim, q_dim, num_hid):
super(Attention, self).__init__()
self.nonlinear = FCNet([v_dim + q_dim, num_hid])
self.linear = weight_norm(nn.Linear(num_hid, 1), dim=None)
def forward(self, v, q):
"""
v: [batch, k, vdim]
q: [batch, qdim]
"""
logits = self.logits(v, q)
w = nn.functional.softmax(logits, 1)
return w
def logits(self, v, q):
num_objs = v.size(1)
q = q.unsqueeze(1).repeat(1, num_objs, 1)
vq = torch.cat((v, q), 2)
joint_repr = self.nonlinear(vq)
logits = self.linear(joint_repr)
return logits
class NewAttention(nn.Module):
def __init__(self, v_dim, q_dim, num_hid, dropout=0.2):
super(NewAttention, self).__init__()
self.v_proj = FCNet([v_dim, num_hid])
self.q_proj = FCNet([q_dim, num_hid])
self.dropout = nn.Dropout(dropout)
self.linear = weight_norm(nn.Linear(q_dim, 1), dim=None)
def forward(self, v, q):
"""
v: [batch, k, vdim]
q: [batch, qdim]
"""
logits = self.logits(v, q)
return logits
def logits(self, v, q):
batch, k, _ = v.size()
v_proj = self.v_proj(v) # [batch, k, qdim]
q_proj = self.q_proj(q).unsqueeze(1).repeat(1, k, 1)
joint_repr = v_proj * q_proj
joint_repr = self.dropout(joint_repr)
logits = self.linear(joint_repr)
return logits
class BiAttention(nn.Module):
def __init__(self, x_dim, y_dim, z_dim, glimpse, dropout=[.2,.5]):
super(BiAttention, self).__init__()
self.glimpse = glimpse
self.logits = weight_norm(BCNet(x_dim, y_dim, z_dim, glimpse, dropout=dropout, k=3), \
name='h_mat', dim=None)
def forward(self, v, q, v_mask=True):
"""
v: [batch, k, vdim]
q: [batch, qdim]
"""
p, logits = self.forward_all(v, q, v_mask)
return p, logits
def forward_all(self, v, q, v_mask=True, logit=False, mask_with=-float('inf')):
v_num = v.size(1)
q_num = q.size(1)
logits = self.logits(v,q) # b x g x v x q
if v_mask:
mask = (0 == v.abs().sum(2)).unsqueeze(1).unsqueeze(3).expand(logits.size())
logits.data.masked_fill_(mask.data, mask_with)
if not logit:
p = nn.functional.softmax(logits.view(-1, self.glimpse, v_num * q_num), 2)
return p.view(-1, self.glimpse, v_num, q_num), logits
return logits