clean up embeddings.py (#7)
Browse files- clean up embeddings.py (6d5377580042f614ac525febd19c641c4d456d9b)
Co-authored-by: Bo Wang <[email protected]>
- embedding.py +0 -102
embedding.py
CHANGED
|
@@ -10,59 +10,6 @@ import torch.nn as nn
|
|
| 10 |
from torch import Tensor
|
| 11 |
|
| 12 |
|
| 13 |
-
class GPT2Embeddings(nn.Module):
|
| 14 |
-
def __init__(
|
| 15 |
-
self,
|
| 16 |
-
embed_dim,
|
| 17 |
-
vocab_size,
|
| 18 |
-
max_position_embeddings,
|
| 19 |
-
padding_idx=None,
|
| 20 |
-
word_embed_proj_dim=None,
|
| 21 |
-
device=None,
|
| 22 |
-
dtype=None,
|
| 23 |
-
):
|
| 24 |
-
"""
|
| 25 |
-
If max_position_embeddings <= 0, there's no position embeddings
|
| 26 |
-
If word_embe_proj_dim is not None (e.g., OPT-350m), we embed to that dimension
|
| 27 |
-
the project up to embed_dim
|
| 28 |
-
"""
|
| 29 |
-
factory_kwargs = {"device": device, "dtype": dtype}
|
| 30 |
-
super().__init__()
|
| 31 |
-
if word_embed_proj_dim is None:
|
| 32 |
-
self.word_embeddings = nn.Embedding(
|
| 33 |
-
vocab_size, embed_dim, padding_idx=padding_idx, **factory_kwargs
|
| 34 |
-
)
|
| 35 |
-
self.project_in = None
|
| 36 |
-
else:
|
| 37 |
-
self.word_embeddings = nn.Embedding(
|
| 38 |
-
vocab_size, word_embed_proj_dim, padding_idx=padding_idx, **factory_kwargs
|
| 39 |
-
)
|
| 40 |
-
self.project_in = nn.Linear(
|
| 41 |
-
word_embed_proj_dim, embed_dim, bias=False, **factory_kwargs
|
| 42 |
-
)
|
| 43 |
-
self.max_position_embeddings = max_position_embeddings
|
| 44 |
-
if self.max_position_embeddings > 0:
|
| 45 |
-
self.position_embeddings = nn.Embedding(
|
| 46 |
-
max_position_embeddings, embed_dim, **factory_kwargs
|
| 47 |
-
)
|
| 48 |
-
|
| 49 |
-
def forward(self, input_ids, position_ids=None):
|
| 50 |
-
"""
|
| 51 |
-
input_ids: (batch, seqlen)
|
| 52 |
-
position_ids: (batch, seqlen)
|
| 53 |
-
"""
|
| 54 |
-
batch_size, seqlen = input_ids.shape
|
| 55 |
-
embeddings = self.word_embeddings(input_ids)
|
| 56 |
-
if self.project_in is not None:
|
| 57 |
-
embeddings = self.project_in(embeddings)
|
| 58 |
-
if self.max_position_embeddings > 0:
|
| 59 |
-
if position_ids is None:
|
| 60 |
-
position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
|
| 61 |
-
position_embeddings = self.position_embeddings(position_ids)
|
| 62 |
-
embeddings = embeddings + position_embeddings
|
| 63 |
-
return embeddings
|
| 64 |
-
|
| 65 |
-
|
| 66 |
class BertEmbeddings(nn.Module):
|
| 67 |
def __init__(
|
| 68 |
self,
|
|
@@ -111,52 +58,3 @@ class BertEmbeddings(nn.Module):
|
|
| 111 |
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
| 112 |
embeddings = embeddings + token_type_embeddings
|
| 113 |
return embeddings
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
class VocabParallelEmbedding(nn.Embedding):
|
| 117 |
-
def __init__(self, num_embeddings, *args, process_group=None, padding_idx=None, **kwargs):
|
| 118 |
-
self.process_group = process_group
|
| 119 |
-
if process_group is not None:
|
| 120 |
-
world_size = torch.distributed.get_world_size(process_group)
|
| 121 |
-
if num_embeddings % world_size != 0:
|
| 122 |
-
raise ValueError(
|
| 123 |
-
f"num_embeddings ({num_embeddings}) must be divisible by "
|
| 124 |
-
f"world_size ({world_size})"
|
| 125 |
-
)
|
| 126 |
-
if world_size > 1 and padding_idx is not None:
|
| 127 |
-
raise RuntimeError("ParallelEmbedding does not support padding_idx")
|
| 128 |
-
else:
|
| 129 |
-
world_size = 1
|
| 130 |
-
super().__init__(num_embeddings // world_size, *args, padding_idx=padding_idx, **kwargs)
|
| 131 |
-
|
| 132 |
-
def forward(self, input: Tensor) -> Tensor:
|
| 133 |
-
if self.process_group is None:
|
| 134 |
-
return super().forward(input)
|
| 135 |
-
else:
|
| 136 |
-
rank = torch.distributed.get_rank(self.process_group)
|
| 137 |
-
vocab_size = self.num_embeddings
|
| 138 |
-
vocab_start_index, vocab_end_index = rank * vocab_size, (rank + 1) * vocab_size
|
| 139 |
-
# Create a mask of valid vocab ids (1 means it needs to be masked).
|
| 140 |
-
input_ids_mask = (input < vocab_start_index) | (input >= vocab_end_index)
|
| 141 |
-
input = input - vocab_start_index
|
| 142 |
-
input[input_ids_mask] = 0
|
| 143 |
-
embeddings = super().forward(input)
|
| 144 |
-
embeddings[input_ids_mask] = 0.0
|
| 145 |
-
return embeddings
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
class ColumnParallelEmbedding(nn.Embedding):
|
| 149 |
-
def __init__(self, num_embeddings, embedding_dim, *args, process_group=None, **kwargs):
|
| 150 |
-
self.process_group = process_group
|
| 151 |
-
if process_group is not None:
|
| 152 |
-
world_size = torch.distributed.get_world_size(process_group)
|
| 153 |
-
if embedding_dim % world_size != 0:
|
| 154 |
-
raise ValueError(
|
| 155 |
-
f"embedding_dim ({embedding_dim}) must be divisible by "
|
| 156 |
-
f"world_size ({world_size})"
|
| 157 |
-
)
|
| 158 |
-
else:
|
| 159 |
-
world_size = 1
|
| 160 |
-
super().__init__(num_embeddings, embedding_dim // world_size, *args, **kwargs)
|
| 161 |
-
|
| 162 |
-
|
|
|
|
| 10 |
from torch import Tensor
|
| 11 |
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
class BertEmbeddings(nn.Module):
|
| 14 |
def __init__(
|
| 15 |
self,
|
|
|
|
| 58 |
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
| 59 |
embeddings = embeddings + token_type_embeddings
|
| 60 |
return embeddings
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|