I have tried to learn TrainAD, but I am not learning because of the error Transformer EncoderLayer.forward() got an unexpected keyword argument ‘is_causal’.
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=16, dropout=0):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = nn.LeakyReLU(True)
def forward(self, src,src_mask=None, src_key_padding_mask=None, **kwargs):
src2 = self.self_attn(src, src, src)[0]
src = src + self.dropout1(src2)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
return src
What should I fix here?
def forward(self, src,src_mask=None, src_key_padding_mask=None, **kwargs)
Said I could put **kwargs in but it didn’t work for me.
New contributor
23_7867 is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.