Module fast_transformers.recurrent.attention.cross_attention.full_attention
Implement the typical softmax attention as a recurrent cross attention module to speed up autoregressive decoding.
Expand source code
#
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>
#
"""Implement the typical softmax attention as a recurrent cross attention
module to speed up autoregressive decoding."""
from math import sqrt
import torch
from torch.nn import Dropout, Module
from ....attention_registry import RecurrentCrossAttentionRegistry, Optional, \
Float, EventDispatcherInstance
from ....events import EventDispatcher
class RecurrentCrossFullAttention(Module):
"""Implement autoregressive softmax cross attention as a recurrent
module.
Arguments
---------
softmax_temp: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.1)
event_dispatcher: str or EventDispatcher instance to be used by this
module for dispatching events (default: the default
global dispatcher)
"""
def __init__(self, softmax_temp=None, attention_dropout=0.1,
event_dispatcher=""):
super(RecurrentCrossFullAttention, self).__init__()
self.softmax_temp = softmax_temp
self.dropout = Dropout(attention_dropout)
self.event_dispatcher = EventDispatcher.get(event_dispatcher)
def forward(self, query, keys, values, key_lengths, state=None):
# Extract some shapes and compute the temperature
N, H, E = query.shape
softmax_temp = self.softmax_temp or 1./sqrt(E)
# Extract the keys and values either from the arguments or the state
if state is not None:
keys, values = state
# Compute the unnormalized attention and apply the key length mask
QK = torch.einsum("nhe,nshe->nsh", query, keys)
QK = QK + key_lengths.additive_matrix[:, :, None]
# Compute the attention and the weighted average
A = self.dropout(torch.softmax(softmax_temp * QK, dim=1))
V = torch.einsum("nsh,nshd->nhd", A, values)
# Make sure that we return a contiguous value
return V.contiguous(), [keys, values]
# Register the attention implementation so that it becomes available in our
# builders
RecurrentCrossAttentionRegistry.register(
"full", RecurrentCrossFullAttention,
[
("softmax_temp", Optional(Float)),
("attention_dropout", Optional(Float, 0.1)),
("event_dispatcher", Optional(EventDispatcherInstance, ""))
]
)
Classes
class RecurrentCrossFullAttention (softmax_temp=None, attention_dropout=0.1, event_dispatcher='')
-
Implement autoregressive softmax cross attention as a recurrent module.
Arguments
softmax_temp: The temperature to use for the softmax attention. (default: 1/sqrt(d_keys) where d_keys is computed at runtime) attention_dropout: The dropout rate to apply to the attention (default: 0.1) event_dispatcher: str or EventDispatcher instance to be used by this module for dispatching events (default: the default global dispatcher)
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class RecurrentCrossFullAttention(Module): """Implement autoregressive softmax cross attention as a recurrent module. Arguments --------- softmax_temp: The temperature to use for the softmax attention. (default: 1/sqrt(d_keys) where d_keys is computed at runtime) attention_dropout: The dropout rate to apply to the attention (default: 0.1) event_dispatcher: str or EventDispatcher instance to be used by this module for dispatching events (default: the default global dispatcher) """ def __init__(self, softmax_temp=None, attention_dropout=0.1, event_dispatcher=""): super(RecurrentCrossFullAttention, self).__init__() self.softmax_temp = softmax_temp self.dropout = Dropout(attention_dropout) self.event_dispatcher = EventDispatcher.get(event_dispatcher) def forward(self, query, keys, values, key_lengths, state=None): # Extract some shapes and compute the temperature N, H, E = query.shape softmax_temp = self.softmax_temp or 1./sqrt(E) # Extract the keys and values either from the arguments or the state if state is not None: keys, values = state # Compute the unnormalized attention and apply the key length mask QK = torch.einsum("nhe,nshe->nsh", query, keys) QK = QK + key_lengths.additive_matrix[:, :, None] # Compute the attention and the weighted average A = self.dropout(torch.softmax(softmax_temp * QK, dim=1)) V = torch.einsum("nsh,nshd->nhd", A, values) # Make sure that we return a contiguous value return V.contiguous(), [keys, values]
Ancestors
- torch.nn.modules.module.Module
Methods
def forward(self, query, keys, values, key_lengths, state=None)
-
Defines the computation performed at every call.
Should be overridden by all subclasses.
Note
Although the recipe for forward pass needs to be defined within this function, one should call the :class:
Module
instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.Expand source code
def forward(self, query, keys, values, key_lengths, state=None): # Extract some shapes and compute the temperature N, H, E = query.shape softmax_temp = self.softmax_temp or 1./sqrt(E) # Extract the keys and values either from the arguments or the state if state is not None: keys, values = state # Compute the unnormalized attention and apply the key length mask QK = torch.einsum("nhe,nshe->nsh", query, keys) QK = QK + key_lengths.additive_matrix[:, :, None] # Compute the attention and the weighted average A = self.dropout(torch.softmax(softmax_temp * QK, dim=1)) V = torch.einsum("nsh,nshd->nhd", A, values) # Make sure that we return a contiguous value return V.contiguous(), [keys, values]